summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/Kconfig13
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c295
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c72
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h466
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2332
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c91
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c150
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c77
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c323
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c170
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c186
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c583
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h124
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c51
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c (renamed from drivers/gpu/drm/i915/i915_gem_dmabuf.h)56
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h72
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c121
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c101
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c648
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c597
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h260
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h8
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c112
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c43
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c362
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c678
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c258
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c559
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c84
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h107
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c29
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c23
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h82
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c450
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c162
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c57
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c587
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c273
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h91
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c154
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c91
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c6
77 files changed, 6751 insertions, 4782 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 1c1b19c..df96aed 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -11,6 +11,7 @@ config DRM_I915
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
+ select RELAY
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
@@ -24,16 +25,17 @@ config DRM_I915
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
- If M is selected, the module will be called i915. AGP support
- is required for this driver to work. This driver is used by
- the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
- replaces the older i830 module that supported a subset of the
- hardware in older X.org releases.
+
+ This driver is used by the Intel driver in X.org 6.8 and
+ XFree86 4.4 and above. It replaces the older i830 module that
+ supported a subset of the hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
+ If "M" is selected, the module will be called i915.
+
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
@@ -85,6 +87,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on 64BIT
default n
help
Choose this option if you want to enable Intel GVT-g graphics
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6123400..0857e50 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -35,16 +35,19 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \
i915_gem_fence.o \
i915_gem_gtt.o \
+ i915_gem_internal.o \
i915_gem.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_gem_timeline.o \
i915_gem_userptr.o \
i915_trace_points.o \
intel_breadcrumbs.o \
intel_engine_cs.o \
+ intel_hangcheck.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index aafb57e..0084ece 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1145,7 +1145,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->event = PRIMARY_B_FLIP_DONE;
break;
case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
- info->pipe = PIPE_B;
+ info->pipe = PIPE_C;
info->event = PRIMARY_C_FLIP_DONE;
break;
default:
@@ -1201,20 +1201,19 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
-#define write_bits(reg, e, s, v) do { \
- vgpu_vreg(vgpu, reg) &= ~GENMASK(e, s); \
- vgpu_vreg(vgpu, reg) |= (v << s); \
-} while (0)
-
- write_bits(info->surf_reg, 31, 12, info->surf_val);
- if (IS_SKYLAKE(dev_priv))
- write_bits(info->stride_reg, 9, 0, info->stride_val);
- else
- write_bits(info->stride_reg, 15, 6, info->stride_val);
- write_bits(info->ctrl_reg, IS_SKYLAKE(dev_priv) ? 12 : 10,
- 10, info->tile_val);
-
-#undef write_bits
+ set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ info->surf_val << 12);
+ if (IS_SKYLAKE(dev_priv)) {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ info->stride_val);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ info->tile_val << 10);
+ } else {
+ set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ info->stride_val << 6);
+ set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ info->tile_val << 10);
+ }
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2cc7613..6554da9 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -276,7 +276,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
pte = readq(addr);
#else
pte = ioread32(addr);
- pte |= ioread32(addr + 4) << 32;
+ pte |= (u64)ioread32(addr + 4) << 32;
#endif
return pte;
}
@@ -1944,7 +1944,7 @@ static int create_scratch_page(struct intel_vgpu *vgpu)
mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr:0x%llx\n", (u64)vaddr);
+ gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
__free_page(gtt->scratch_page);
gtt->scratch_page = NULL;
return -ENXIO;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 31b59d4..385969a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -65,6 +65,8 @@ struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
*/
int intel_gvt_init_host(void)
{
+ int ret;
+
if (intel_gvt_host.initialized)
return 0;
@@ -90,7 +92,8 @@ int intel_gvt_init_host(void)
return -EINVAL;
/* Try to detect if we're running in host instead of VM. */
- if (!intel_gvt_hypervisor_detect_host())
+ ret = intel_gvt_hypervisor_detect_host();
+ if (ret)
return -ENODEV;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
@@ -103,19 +106,20 @@ int intel_gvt_init_host(void)
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
- info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
}
+ info->msi_cap_offset = pdev->msi_cap;
}
static int gvt_service_thread(void *data)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 11df62b..62fc9e3 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -382,6 +382,8 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
+int setup_vgpu_mmio(struct intel_vgpu *vgpu);
+void populate_pvinfo_page(struct intel_vgpu *vgpu);
#include "mpt.h"
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 3e74fb3..9ab1f95 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -239,7 +239,11 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
vgpu->resetting = true;
intel_vgpu_stop_schedule(vgpu);
- if (scheduler->current_vgpu == vgpu) {
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
mutex_unlock(&vgpu->gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&vgpu->gvt->lock);
@@ -247,6 +251,16 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
intel_vgpu_reset_execlist(vgpu, bitmap);
+ /* full GPU reset */
+ if (bitmap == 0xff) {
+ mutex_unlock(&vgpu->gvt->lock);
+ intel_vgpu_clean_gtt(vgpu);
+ mutex_lock(&vgpu->gvt->lock);
+ setup_vgpu_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+ intel_vgpu_init_gtt(vgpu);
+ }
+
vgpu->resetting = false;
return 0;
@@ -258,6 +272,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data;
u64 bitmap = 0;
+ write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
@@ -1305,7 +1320,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
- int ret;
+ int ret = 0;
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL;
@@ -1313,12 +1328,15 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
execlist = &vgpu->execlist[ring_id];
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
- if (execlist->elsp_dwords.index == 3)
+ if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ if(ret)
+ gvt_err("fail submit workload on ring %d\n", ring_id);
+ }
++execlist->elsp_dwords.index;
execlist->elsp_dwords.index &= 0x3;
- return 0;
+ return ret;
}
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 973c8a9..9521891 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -163,7 +163,7 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
*/
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{
- iounmap(gvt->opregion.opregion_va);
+ memunmap(gvt->opregion.opregion_va);
gvt->opregion.opregion_va = NULL;
}
@@ -181,8 +181,8 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt)
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
&gvt->opregion.opregion_pa);
- gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
- INTEL_GVT_OPREGION_SIZE);
+ gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
+ INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
if (!gvt->opregion.opregion_va) {
gvt_err("fail to map host opregion\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index feebb65..3af894b 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -118,6 +118,7 @@ static u32 gen9_render_mocs_L3[32];
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
[RCS] = 0x4260,
@@ -135,11 +136,25 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
reg = _MMIO(regs[ring_id]);
- I915_WRITE(reg, 0x1);
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ fw |= FORCEWAKE_RENDER;
- if (wait_for_atomic((I915_READ(reg) == 0), 50))
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ intel_uncore_forcewake_put(dev_priv, fw);
+
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@@ -162,6 +177,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (!IS_SKYLAKE(dev_priv))
return;
+ offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
@@ -199,6 +215,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
if (!IS_SKYLAKE(dev_priv))
return;
+ offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset);
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e96eaee..18acb45 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -400,21 +400,27 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
+ long lret;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(scheduler->waitq[ring_id],
- kthread_should_stop() ||
- (workload = pick_next_workload(gvt, ring_id)));
-
- WARN_ON_ONCE(ret);
-
- if (kthread_should_stop())
+ add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ do {
+ workload = pick_next_workload(gvt, ring_id);
+ if (workload)
+ break;
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ } while (!kthread_should_stop());
+ remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+
+ if (!workload)
break;
mutex_lock(&scheduler_mutex);
@@ -444,10 +450,12 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
- workload->status = i915_wait_request(workload->req,
- 0, NULL, NULL);
- if (workload->status != 0)
+ lret = i915_wait_request(workload->req,
+ 0, MAX_SCHEDULE_TIMEOUT);
+ if (lret < 0) {
+ workload->status = lret;
gvt_err("fail to wait workload, skip\n");
+ }
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 9401436..4f54005 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -41,7 +41,7 @@ static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
}
-static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
+int setup_vgpu_mmio(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
@@ -103,7 +103,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
}
}
-static void populate_pvinfo_page(struct intel_vgpu *vgpu)
+void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f191d7b..f5039f4 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
if (ret == 0 && needs_clflush_after)
- drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+ drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 20638d2..c9465fb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -107,12 +107,12 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return obj->fault_mappable ? 'g' : ' ';
+ return !list_empty(&obj->userfault_link) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
- return obj->mapping ? 'M' : ' ';
+ return obj->mm.mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -136,11 +136,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
unsigned int frontbuffer_bits;
int pin_count = 0;
- enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
+ seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
get_pin_flag(obj),
@@ -149,17 +148,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
- obj->base.write_domain);
- for_each_engine(engine, dev_priv, id)
- seq_printf(m, "%x ",
- i915_gem_active_get_seqno(&obj->last_read[id],
- &obj->base.dev->struct_mutex));
- seq_printf(m, "] %x %s%s%s",
- i915_gem_active_get_seqno(&obj->last_write,
- &obj->base.dev->struct_mutex),
+ obj->base.write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
- obj->dirty ? " dirty" : "",
- obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ obj->mm.dirty ? " dirty" : "",
+ obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
@@ -187,8 +179,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- engine = i915_gem_active_get_engine(&obj->last_write,
- &dev_priv->drm.struct_mutex);
+ engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
@@ -226,7 +217,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -236,7 +227,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
if (obj->stolen == NULL)
continue;
@@ -399,16 +390,16 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
size += obj->base.size;
++count;
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -416,7 +407,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
size += obj->base.size;
++count;
@@ -425,12 +416,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
++dpy_count;
}
- if (obj->madv == I915_MADV_DONTNEED) {
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
- if (obj->mapping) {
+ if (obj->mm.mapping) {
mapped_count++;
mapped_size += obj->base.size;
}
@@ -502,7 +493,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (show_pin_display_only && !obj->pin_display)
continue;
@@ -561,7 +552,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
- dev_priv->next_seqno,
+ atomic_read(&dev_priv->gt.global_timeline.next_seqno),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
@@ -640,17 +631,10 @@ static void print_request(struct seq_file *m,
struct drm_i915_gem_request *rq,
const char *prefix)
{
- struct pid *pid = rq->ctx->pid;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
- seq_printf(m, "%s%x [%x:%x] @ %d: %s [%d]\n", prefix,
- rq->fence.seqno, rq->ctx->hw_id, rq->fence.seqno,
+ seq_printf(m, "%s%x [%x:%x] @ %d: %s\n", prefix,
+ rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- task ? task->comm : "<unknown>",
- task ? task->pid : -1);
- rcu_read_unlock();
+ rq->timeline->common->name);
}
static int i915_gem_request_info(struct seq_file *m, void *data)
@@ -671,13 +655,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
int count;
count = 0;
- list_for_each_entry(req, &engine->request_list, link)
+ list_for_each_entry(req, &engine->timeline->requests, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->request_list, link)
+ list_for_each_entry(req, &engine->timeline->requests, link)
print_request(m, req, " ");
any++;
@@ -699,14 +683,14 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@ -743,17 +727,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
+ intel_display_power_put(dev_priv, power_domain);
+ }
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -1046,15 +1045,8 @@ static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
- *val = dev_priv->next_seqno;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ *val = atomic_read(&dev_priv->gt.global_timeline.next_seqno);
return 0;
}
@@ -1069,7 +1061,7 @@ i915_next_seqno_set(void *data, u64 val)
if (ret)
return ret;
- ret = i915_gem_set_seqno(dev, val);
+ ret = i915_gem_set_global_seqno(dev, val);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1356,21 +1348,20 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
- engine->hangcheck.seqno,
- seqno[id],
- engine->last_submitted_seqno);
+ engine->hangcheck.seqno, seqno[id],
+ intel_engine_last_submit(engine));
seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)));
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
@@ -1396,14 +1387,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
@@ -1411,7 +1397,6 @@ static int ironlake_drpc_info(struct seq_file *m)
crstandvid = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
@@ -1674,11 +1659,13 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
- if (intel_fbc_is_active(dev_priv) &&
- INTEL_GEN(dev_priv) >= 7)
+ if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
+ uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
+ BDW_FBC_COMPRESSION_MASK :
+ IVB_FBC_COMPRESSION_MASK;
seq_printf(m, "Compressing: %s\n",
- yesno(I915_READ(FBC_STATUS2) &
- FBC_COMPRESSION_MASK));
+ yesno(I915_READ(FBC_STATUS2) & mask));
+ }
mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
@@ -1757,6 +1744,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
@@ -1770,6 +1758,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
seq_printf(m, "self-refresh: %s\n",
@@ -2015,7 +2004,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));
- if (i915_gem_object_get_pages(vma->obj)) {
+ if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
@@ -2034,6 +2023,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
kunmap_atomic(reg_state);
}
+ i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}
@@ -2091,12 +2081,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
@@ -2136,7 +2121,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -2292,8 +2276,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
- seq_printf(m, "GPU busy? %s [%x]\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
+ seq_printf(m, "GPU busy? %s [%d requests]\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
@@ -2328,7 +2312,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
if (INTEL_GEN(dev_priv) >= 6 &&
dev_priv->rps.enabled &&
- dev_priv->gt.active_engines) {
+ dev_priv->gt.active_requests) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2409,6 +2393,32 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
return 0;
}
+static void i915_guc_log_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ seq_puts(m, "\nGuC logging stats:\n");
+
+ seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_ISR_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
+
+ seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_DPC_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
+
+ seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
+ guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
+ guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
+
+ seq_printf(m, "\tTotal flush interrupt count: %u\n",
+ guc->log.flush_interrupt_count);
+
+ seq_printf(m, "\tCapture miss count: %u\n",
+ guc->log.capture_miss_count);
+}
+
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
@@ -2482,6 +2492,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
i915_guc_client_info(m, dev_priv, &client);
+ i915_guc_log_info(m, dev_priv);
+
/* Add more as required ... */
return 0;
@@ -2493,10 +2505,10 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj;
int i = 0, pg;
- if (!dev_priv->guc.log_vma)
+ if (!dev_priv->guc.log.vma)
return 0;
- obj = dev_priv->guc.log_vma->obj;
+ obj = dev_priv->guc.log.vma->obj;
for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
@@ -2513,6 +2525,44 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return 0;
}
+static int i915_guc_log_control_get(void *data, u64 *val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ *val = i915.guc_log_level;
+
+ return 0;
+}
+
+static int i915_guc_log_control_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ if (!dev_priv->guc.log.vma)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(dev_priv);
+ ret = i915_guc_log_control(dev_priv, val);
+ intel_runtime_pm_put(dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
+ i915_guc_log_control_get, i915_guc_log_control_set,
+ "%lld\n");
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2542,11 +2592,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain))
+ continue;
+
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
@@ -3094,6 +3155,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_runtime_pm_get(dev_priv);
+
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq;
@@ -3103,7 +3166,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "%s\n", engine->name);
seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
intel_engine_get_seqno(engine),
- engine->last_submitted_seqno,
+ intel_engine_last_submit(engine),
engine->hangcheck.seqno,
engine->hangcheck.score);
@@ -3111,14 +3174,14 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "\tRequests:\n");
- rq = list_first_entry(&engine->request_list,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->request_list)
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tfirst ");
- rq = list_last_entry(&engine->request_list,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->request_list)
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
print_request(m, rq, "\t\tlast ");
rq = i915_gem_find_active_request(engine);
@@ -3192,6 +3255,12 @@ static int i915_engine_info(struct seq_file *m, void *unused)
else
seq_printf(m, "\t\tELSP[1] idle\n");
rcu_read_unlock();
+
+ spin_lock_irq(&engine->execlist_lock);
+ list_for_each_entry(rq, &engine->execlist_queue, execlist_link) {
+ print_request(m, rq, "\t\tQ ");
+ }
+ spin_unlock_irq(&engine->execlist_lock);
} else if (INTEL_GEN(dev_priv) > 6) {
seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine)));
@@ -3201,18 +3270,20 @@ static int i915_engine_info(struct seq_file *m, void *unused)
I915_READ(RING_PP_DIR_DCLV(engine)));
}
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
seq_puts(m, "\n");
}
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -3274,15 +3345,6 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
- seq_puts(m, "\nSync seqno:\n");
- for_each_engine(engine, dev_priv, id) {
- for (j = 0; j < num_rings; j++)
- seq_printf(m, " 0x%08x ",
- engine->semaphore.sync_seqno[j]);
- seq_putc(m, '\n');
- }
- seq_putc(m, '\n');
-
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3375,7 +3437,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
for_each_pipe(dev_priv, pipe) {
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
entry->start, entry->end,
@@ -4009,8 +4071,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
int ret = 0;
@@ -4076,10 +4137,8 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -4150,15 +4209,15 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
if (crtc->base.state->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
@@ -4798,13 +4857,9 @@ i915_wedged_set(void *data, u64 val)
if (i915_reset_in_progress(&dev_priv->gpu_error))
return -EAGAIN;
- intel_runtime_pm_get(dev_priv);
-
i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
- intel_runtime_pm_put(dev_priv);
-
return 0;
}
@@ -4872,10 +4927,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
-#define DROP_ALL (DROP_UNBOUND | \
- DROP_BOUND | \
- DROP_RETIRE | \
- DROP_ACTIVE)
+#define DROP_FREED 0x10
+#define DROP_ALL (DROP_UNBOUND | \
+ DROP_BOUND | \
+ DROP_RETIRE | \
+ DROP_ACTIVE | \
+ DROP_FREED)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4919,6 +4976,11 @@ i915_drop_caches_set(void *data, u64 val)
unlock:
mutex_unlock(&dev->struct_mutex);
+ if (val & DROP_FREED) {
+ synchronize_rcu();
+ flush_work(&dev_priv->mm.free_work);
+ }
+
return ret;
}
@@ -5039,22 +5101,16 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
- int ret;
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -5414,7 +5470,8 @@ static const struct i915_debugfs_files {
{"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops}
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_guc_log_control", &i915_guc_log_control_fops}
};
void intel_display_crc_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 912d534..b72c24f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -537,14 +537,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static void i915_gem_fini(struct drm_device *dev)
+static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_cleanup_engines(&dev_priv->drm);
+ i915_gem_context_fini(&dev_priv->drm);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ rcu_barrier();
+ flush_work(&dev_priv->mm.free_work);
- WARN_ON(!list_empty(&to_i915(dev)->context_list));
+ WARN_ON(!list_empty(&dev_priv->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@@ -592,7 +595,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- intel_modeset_init(dev);
+ ret = intel_modeset_init(dev);
+ if (ret)
+ goto cleanup_irq;
intel_guc_init(dev);
@@ -619,7 +624,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
if (i915_gem_suspend(dev))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
- i915_gem_fini(dev);
+ i915_gem_fini(dev_priv);
cleanup_irq:
intel_guc_fini(dev);
drm_irq_uninstall(dev);
@@ -825,10 +830,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
+ intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- i915_gem_load_init(&dev_priv->drm);
+ ret = i915_gem_load_init(&dev_priv->drm);
+ if (ret < 0)
+ goto err_gvt;
intel_display_crc_init(dev_priv);
@@ -838,6 +846,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
return 0;
+err_gvt:
+ intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -972,7 +982,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_device *dev = &dev_priv->drm;
int ret;
if (i915_inject_load_failure())
@@ -1030,7 +1039,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+ if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1111,6 +1120,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
+ i915_guc_register(dev_priv);
i915_setup_sysfs(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1149,6 +1159,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_opregion_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
+ i915_guc_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1303,7 +1314,7 @@ void i915_driver_unload(struct drm_device *dev)
drain_workqueue(dev_priv->wq);
intel_guc_fini(dev);
- i915_gem_fini(dev);
+ i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -1425,7 +1436,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev);
+ intel_suspend_hw(dev_priv);
i915_gem_suspend_gtt_mappings(dev);
@@ -1589,6 +1600,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_resume(dev);
+ drm_kms_helper_poll_enable(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
@@ -1596,8 +1609,6 @@ static int i915_drm_resume(struct drm_device *dev)
* notifications.
* */
intel_hpd_init(dev_priv);
- /* Config may have changed between suspend and resume */
- drm_helper_hpd_irq_event(dev);
intel_opregion_register(dev_priv);
@@ -1610,7 +1621,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);
- drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2254,7 +2264,6 @@ err1:
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
- struct drm_device *dev = &dev_priv->drm;
int err;
int ret;
@@ -2278,10 +2287,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
vlv_check_no_gt_access(dev_priv);
- if (rpm_resume) {
- intel_init_clock_gating(dev);
- i915_gem_restore_fences(dev);
- }
+ if (rpm_resume)
+ intel_init_clock_gating(dev_priv);
return ret;
}
@@ -2301,32 +2308,13 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n");
- /*
- * We could deadlock here in case another thread holding struct_mutex
- * calls RPM suspend concurrently, since the RPM suspend will wait
- * first for this RPM suspend to finish. In this case the concurrent
- * RPM resume will be followed by its RPM suspend counterpart. Still
- * for consistency return -EAGAIN, which will reschedule this suspend.
- */
- if (!mutex_trylock(&dev->struct_mutex)) {
- DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
- /*
- * Bump the expiration timestamp, otherwise the suspend won't
- * be rescheduled.
- */
- pm_runtime_mark_last_busy(kdev);
-
- return -EAGAIN;
- }
-
disable_rpm_wakeref_asserts(dev_priv);
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
- i915_gem_release_all_mmaps(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev);
@@ -2591,7 +2579,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
- .gem_free_object = i915_gem_free_object,
+ .gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f022f43..80775b8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
+#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
@@ -62,6 +63,7 @@
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
#include "intel_gvt.h"
@@ -70,8 +72,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20161024"
-#define DRIVER_TIMESTAMP 1477290335
+#define DRIVER_DATE "20161108"
+#define DRIVER_TIMESTAMP 1478587895
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -183,7 +185,7 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
enum port {
PORT_NONE = -1,
@@ -312,7 +314,7 @@ struct i915_hotplug {
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
for_each_if ((__mask) & (1 << (__p)))
-#define for_each_plane(__dev_priv, __pipe, __p) \
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
@@ -492,8 +494,8 @@ struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
- int (*get_display_clock_speed)(struct drm_device *dev);
- int (*get_fifo_size)(struct drm_device *dev, int plane);
+ int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+ int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
@@ -501,7 +503,7 @@ struct drm_i915_display_funcs {
void (*initial_watermarks)(struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct drm_atomic_state *state);
- void (*update_wm)(struct drm_crtc *crtc);
+ void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
@@ -523,7 +525,7 @@ struct drm_i915_display_funcs {
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
- void (*init_clock_gating)(struct drm_device *dev);
+ void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
@@ -668,6 +670,7 @@ struct intel_csr {
func(is_kabylake); \
func(is_preliminary); \
/* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
@@ -746,6 +749,8 @@ struct intel_display_error_state;
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
+ struct timeval boottime;
+ struct timeval uptime;
struct drm_i915_private *i915;
@@ -778,6 +783,7 @@ struct drm_i915_error_state {
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore;
+ struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
@@ -797,7 +803,6 @@ struct drm_i915_error_state {
u32 cpu_ring_tail;
u32 last_seqno;
- u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
u32 start;
@@ -933,6 +938,7 @@ struct i915_gem_context {
struct drm_i915_file_private *file_priv;
struct i915_hw_ppgtt *ppgtt;
struct pid *pid;
+ const char *name;
struct i915_ctx_hang_stats hang_stats;
@@ -1319,6 +1325,12 @@ struct i915_power_well {
/* cached hw enabled state */
bool hw_enabled;
unsigned long domains;
+ /* unique identifier for this power well */
+ unsigned long id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
unsigned long data;
const struct i915_power_well_ops *ops;
};
@@ -1356,11 +1368,22 @@ struct i915_gem_mm {
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU) but still have
- * (presumably uncached) pages still attached.
+ * are idle and not used by the GPU). These objects may or may
+ * not actually have any pages attached.
*/
struct list_head unbound_list;
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /**
+ * List of objects which are pending destruction.
+ */
+ struct llist_head free_list;
+ struct work_struct free_work;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1409,6 +1432,9 @@ struct i915_error_state_file_priv {
struct drm_i915_error_state *error;
};
+#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
+#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1682,7 +1708,6 @@ struct skl_wm_level {
*/
struct i915_runtime_pm {
atomic_t wakeref_count;
- atomic_t atomic_seq;
bool suspended;
bool irqs_enabled;
};
@@ -1807,7 +1832,6 @@ struct drm_i915_private {
struct i915_gem_context *kernel_context;
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct i915_vma *semaphore;
- u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -1832,8 +1856,10 @@ struct drm_i915_private {
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
- u32 pm_irq_mask;
+ u32 pm_imr;
+ u32 pm_ier;
u32 pm_rps_events;
+ u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1910,8 +1936,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
- struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
- struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
+ struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
@@ -2065,6 +2091,10 @@ struct drm_i915_private {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
+ struct list_head timelines;
+ struct i915_gem_timeline global_timeline;
+ u32 active_requests;
+
/**
* Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power
@@ -2072,7 +2102,6 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
- unsigned int active_engines;
bool awake;
/**
@@ -2092,6 +2121,8 @@ struct drm_i915_private {
* off the idle_work.
*/
struct delayed_work idle_work;
+
+ ktime_t last_init_time;
} gt;
/* perform PHY state sanity checks? */
@@ -2151,6 +2182,7 @@ enum hdmi_force_audio {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -2165,8 +2197,8 @@ struct drm_i915_gem_object_ops {
* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
- int (*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *);
+ struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
@@ -2200,10 +2232,20 @@ struct drm_i915_gem_object {
/** List of VMAs backed by this object */
struct list_head vma_list;
+ struct rb_root vma_tree;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
- struct list_head global_list;
+ struct list_head global_link;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ struct list_head userfault_link;
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
@@ -2211,33 +2253,12 @@ struct drm_i915_gem_object {
struct list_head batch_pool_link;
unsigned long flags;
- /**
- * This is set if the object is on the active lists (has pending
- * rendering and so a non-zero seqno), and is not set if it i s on
- * inactive (ready to be unbound) list.
- */
-#define I915_BO_ACTIVE_SHIFT 0
-#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
-#define __I915_BO_ACTIVE(bo) \
- ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
/**
- * This is set if the object has been written to since last bound
- * to the GTT
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
*/
- unsigned int dirty:1;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * Whether the current gtt mapping needs to be mappable (and isn't just
- * mappable by accident). Track pin and fault separate for a more
- * accurate mappable working set.
- */
- unsigned int fault_mappable:1;
+#define I915_BO_ACTIVE_REF 0
/*
* Is the object to be mapped as read-only to the GPU
@@ -2258,15 +2279,41 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
unsigned int bind_count;
+ unsigned int active_count;
unsigned int pin_display;
- struct sg_table *pages;
- int pages_pin_count;
- struct get_page {
- struct scatterlist *sg;
- int last;
- } get_page;
- void *mapping;
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
@@ -2278,8 +2325,7 @@ struct drm_i915_gem_object {
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
*/
- struct i915_gem_active last_read[I915_NUM_ENGINES];
- struct i915_gem_active last_write;
+ struct reservation_object *resv;
/** References from framebuffers, locks out tiling changes. */
unsigned long framebuffer_references;
@@ -2290,8 +2336,6 @@ struct drm_i915_gem_object {
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
- unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
@@ -2300,6 +2344,8 @@ struct drm_i915_gem_object {
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
+
+ struct reservation_object __builtin_resv;
};
static inline struct drm_i915_gem_object *
@@ -2311,10 +2357,38 @@ to_intel_bo(struct drm_gem_object *gem)
return container_of(gem, struct drm_i915_gem_object, base);
}
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
- return to_intel_bo(drm_gem_object_lookup(file, handle));
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
}
__deprecated
@@ -2336,59 +2410,61 @@ __attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
- drm_gem_object_unreference(&obj->base);
+ __drm_gem_object_unreference(&obj->base);
}
__deprecated
extern void drm_gem_object_unreference(struct drm_gem_object *);
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_unreference_unlocked(&obj->base);
-}
-
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
}
-static inline unsigned long
-i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
}
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_get_active(obj);
+ return obj->active_count;
}
-static inline void
-i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
{
- obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
+ return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
static inline void
-i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
{
- obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
-static inline bool
-i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
- int engine)
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
{
- return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
}
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{
@@ -2407,6 +2483,23 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
return obj->tiling_and_stride & STRIDE_MASK;
}
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
{
i915_gem_object_get(vma->obj);
@@ -2415,7 +2508,6 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
static inline void i915_vma_put(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
i915_gem_object_put(vma->obj);
}
@@ -2445,6 +2537,14 @@ static __always_inline struct sgt_iter {
return s;
}
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+ ++sg;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+ return sg;
+}
+
/**
* __sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
@@ -2459,9 +2559,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
- return sg_is_last(sg) ? NULL :
- likely(!sg_is_chain(++sg)) ? sg :
- sg_chain_ptr(sg);
+ return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
/**
@@ -2633,20 +2731,20 @@ struct drm_i915_cmd_table {
#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x)
#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g)
#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm)
+#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater)
+#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline)
#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview)
+#define IS_G33(dev_priv) ((dev_priv)->info.is_g33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
@@ -2659,7 +2757,7 @@ struct drm_i915_cmd_table {
#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
@@ -2803,7 +2901,7 @@ struct drm_i915_cmd_table {
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
@@ -2820,6 +2918,8 @@ struct drm_i915_cmd_table {
#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
@@ -2912,6 +3012,7 @@ extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
extern void i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -3095,7 +3196,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_gem_load_init(struct drm_device *dev);
+int i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -3127,70 +3228,85 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __sg_page_count(struct scatterlist *sg)
+static inline int __sg_page_count(const struct scatterlist *sg)
{
return sg->length >> PAGE_SHIFT;
}
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
-static inline dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ might_lock(&obj->mm.lock);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+ return 0;
- return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+ return __i915_gem_object_get_pages(obj);
}
-static inline struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
- return NULL;
-
- if (n < obj->get_page.last) {
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
- }
+ GEM_BUG_ON(!obj->mm.pages);
- while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
- obj->get_page.last += __sg_page_count(obj->get_page.sg++);
- if (unlikely(sg_is_chain(obj->get_page.sg)))
- obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
- }
+ atomic_inc(&obj->mm.pages_pin_count);
+}
- return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->mm.pages_pin_count);
}
-static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- GEM_BUG_ON(obj->pages == NULL);
- obj->pages_pin_count++;
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(!obj->mm.pages);
+
+ atomic_dec(&obj->mm.pages_pin_count);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
-static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- GEM_BUG_ON(obj->pages_pin_count == 0);
- obj->pages_pin_count--;
- GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
+ __i915_gem_object_unpin_pages(obj);
}
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+ I915_MM_NORMAL = 0,
+ I915_MM_SHRINKER
+};
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
@@ -3206,8 +3322,8 @@ enum i915_map_type {
* the kernel address space. Based on the @type of mapping, the PTE will be
* set to either WriteBack or WriteCombine (via pgprot_t).
*
- * The caller must hold the struct_mutex, and is responsible for calling
- * i915_gem_object_unpin_map() when the mapping is no longer required.
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
*
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
@@ -3223,12 +3339,9 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
- *
- * The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}
@@ -3261,7 +3374,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
-int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
@@ -3300,9 +3413,10 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
int __must_check i915_gem_suspend(struct drm_device *dev);
void i915_gem_resume(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int __must_check
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly);
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
@@ -3384,6 +3498,7 @@ int __must_check i915_vma_put_fence(struct i915_vma *vma);
static inline bool
i915_vma_pin_fence(struct i915_vma *vma)
{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (vma->fence) {
vma->fence->pin_count++;
return true;
@@ -3402,6 +3517,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (vma->fence) {
GEM_BUG_ON(vma->fence->pin_count <= 0);
vma->fence->pin_count--;
@@ -3411,8 +3527,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_gem_restore_fences(struct drm_device *dev);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3422,6 +3540,9 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3455,6 +3576,16 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
+static inline struct intel_timeline *
+i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm;
+
+ vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ return &vm->timeline.engine[engine->id];
+}
+
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
@@ -3508,6 +3639,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size);
+/* i915_gem_internal.c */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
+ unsigned int size);
+
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
@@ -3690,7 +3826,7 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv);
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_init(struct drm_device *dev);
+extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
@@ -3745,6 +3881,23 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask);
+uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
@@ -3834,11 +3987,30 @@ __raw_write(64, q)
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections inside IRQ handlers where forcewake is explicitly
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
* controlled.
+ *
* Think twice, and think again, before using these.
- * Note: Should only be used between intel_uncore_forcewake_irqlock() and
- * intel_uncore_forcewake_irqunlock().
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&dev_priv->uncore.lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&dev_priv->uncore.lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
@@ -3915,7 +4087,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
/* Ensure our read of the seqno is coherent so that we
@@ -3966,7 +4138,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
wake_up_process(tsk);
rcu_read_unlock();
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bf5e906..41e697e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,6 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -42,6 +41,7 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
@@ -63,13 +63,13 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
}
static int
-insert_mappable_node(struct drm_i915_private *i915,
+insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
- size, 0, 0, 0,
- i915->ggtt.mappable_end,
+ return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
+ size, 0, -1,
+ 0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
}
@@ -104,6 +104,8 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
+ might_sleep();
+
if (!i915_reset_in_progress(error))
return 0;
@@ -114,7 +116,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
!i915_reset_in_progress(error),
- 10*HZ);
+ I915_RESET_TIMEOUT);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
@@ -167,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int
+static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -177,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
@@ -185,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
- return PTR_ERR(page);
+ return ERR_CAST(page);
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -200,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
@@ -214,29 +216,31 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size;
- obj->pages = st;
- return 0;
+ return st;
}
static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
{
- int ret;
+ GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
- BUG_ON(obj->madv == __I915_MADV_PURGED);
+ if (obj->mm.madv == I915_MADV_DONTNEED)
+ obj->mm.dirty = false;
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ i915_gem_clflush_object(obj, false);
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
- if (obj->dirty) {
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __i915_gem_object_release_shmem(obj);
+
+ if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
@@ -255,22 +259,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
kunmap_atomic(dst);
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
vaddr += PAGE_SIZE;
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
}
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
drm_pci_free(obj->base.dev, obj->phys_handle);
+ i915_gem_object_unpin_pages(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -292,7 +297,12 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -311,88 +321,143 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret;
}
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for just read access or read-write access
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly)
+static long
+i915_gem_object_wait_fence(struct dma_fence *fence,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
{
- struct reservation_object *resv;
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct drm_i915_gem_request *rq;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
- if (!readonly) {
- active = obj->last_read;
- active_mask = i915_gem_object_get_active(obj);
- } else {
- active_mask = 1;
- active = &obj->last_write;
- }
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return timeout;
- for_each_active(active_mask, idx) {
- int ret;
+ if (!dma_fence_is_i915(fence))
+ return dma_fence_wait_timeout(fence,
+ flags & I915_WAIT_INTERRUPTIBLE,
+ timeout);
- ret = i915_gem_active_wait(&active[idx],
- &obj->base.dev->struct_mutex);
- if (ret)
- return ret;
+ rq = to_request(fence);
+ if (i915_gem_request_completed(rq))
+ goto out;
+
+ /* This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we wait. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery). Not all clients even want their results
+ * immediately and for them we should just let the GPU select its own
+ * frequency to maximise efficiency. To prevent a single client from
+ * forcing the clocks too high for the whole system, we only allow
+ * each client to waitboost once in a busy period.
+ */
+ if (rps) {
+ if (INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+ else
+ rps = NULL;
}
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long err;
+ timeout = i915_wait_request(rq, flags, timeout);
- err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
- MAX_SCHEDULE_TIMEOUT);
- if (err < 0)
- return err;
+out:
+ if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
+ i915_gem_request_retire_upto(rq);
+
+ if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
+ /* The GPU is now idle and this client has stalled.
+ * Since no other client has submitted a request in the
+ * meantime, assume that this client is the only one
+ * supplying work to the GPU but is unable to keep that
+ * work supplied because it is waiting. Since the GPU is
+ * then never kept fully busy, RPS autoclocking will
+ * keep the clocks relatively low, causing further delays.
+ * Compensate by giving the synchronous client credit for
+ * a waitboost next time.
+ */
+ spin_lock(&rq->i915->rps.client_lock);
+ list_del_init(&rps->link);
+ spin_unlock(&rq->i915->rps.client_lock);
}
- return 0;
+ return timeout;
}
-/* A nonblocking variant of the above wait. Must be called prior to
- * acquiring the mutex for the object, as the object state may change
- * during this call. A reference must be held by the caller for the object.
- */
-static __must_check int
-__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
- struct intel_rps_client *rps,
- bool readonly)
+static long
+i915_gem_object_wait_reservation(struct reservation_object *resv,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct dma_fence *excl;
- active_mask = __I915_BO_ACTIVE(obj);
- if (!active_mask)
- return 0;
-
- if (!readonly) {
- active = obj->last_read;
- } else {
- active_mask = 1;
- active = &obj->last_write;
- }
-
- for_each_active(active_mask, idx) {
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
int ret;
- ret = i915_gem_active_wait_unlocked(&active[idx],
- I915_WAIT_INTERRUPTIBLE,
- NULL, rps);
+ ret = reservation_object_get_fences_rcu(resv,
+ &excl, &count, &shared);
if (ret)
return ret;
+
+ for (i = 0; i < count; i++) {
+ timeout = i915_gem_object_wait_fence(shared[i],
+ flags, timeout,
+ rps);
+ if (timeout <= 0)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(resv);
}
- return 0;
+ if (excl && timeout > 0)
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+
+ dma_fence_put(excl);
+
+ return timeout;
+}
+
+/**
+ * Waits for rendering to the object to be completed
+ * @obj: i915 gem object
+ * @flags: how to wait (under a lock, for all rendering or just for writes etc)
+ * @timeout: how long to wait
+ * @rps: client (user process) to charge for any waitboosting
+ */
+int
+i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout,
+ struct intel_rps_client *rps)
+{
+ might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
+ !!(flags & I915_WAIT_LOCKED));
+#endif
+ GEM_BUG_ON(timeout < 0);
+
+ timeout = i915_gem_object_wait_reservation(obj->resv,
+ flags, timeout,
+ rps);
+ return timeout < 0 ? timeout : 0;
}
static struct intel_rps_client *to_rps_client(struct drm_file *file)
@@ -416,7 +481,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
return 0;
}
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
if (obj->base.filp == NULL)
@@ -426,9 +491,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_put_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ if (obj->mm.pages)
+ return -EBUSY;
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
@@ -438,23 +503,29 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
- return i915_gem_object_get_pages(obj);
+ return i915_gem_object_pin_pages(obj);
}
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- int ret = 0;
+ int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
return ret;
@@ -516,7 +587,7 @@ i915_gem_create(struct drm_file *file,
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
@@ -548,6 +619,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_create *args = data;
+ i915_gem_flush_free_objects(to_i915(dev));
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -614,21 +687,24 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
{
int ret;
- *needs_clflush = 0;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ *needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu read domain, set ourself into the gtt
@@ -661,20 +737,25 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
{
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
*needs_clflush = 0;
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu write domain, set ourself into the
@@ -704,7 +785,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
obj->cache_dirty = true;
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = 1;
+ obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
@@ -713,32 +794,6 @@ err_unpin:
return ret;
}
-/* Per-page copy function for the shmem pread fastpath.
- * Flushes invalid cachelines before reading the target if
- * needs_clflush is set. */
-static int
-shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
bool swizzled)
@@ -764,7 +819,7 @@ shmem_clflush_swizzled_range(char *addr, unsigned long length,
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
-shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pread_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
{
@@ -773,60 +828,130 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (needs_clflush)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
+ ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
+ ret = __copy_to_user(user_data, vaddr + offset, length);
kunmap(page);
return ret ? - EFAULT : 0;
}
-static inline unsigned long
-slow_user_access(struct io_mapping *mapping,
- uint64_t page_base, int page_offset,
- char __user *user_data,
- unsigned long length, bool pwrite)
+static int
+shmem_pread(struct page *page, int offset, int length, char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ int ret;
+
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
+
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + offset, length);
+ ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return 0;
+
+ return shmem_pread_slow(page, offset, length, user_data,
+ page_do_bit17_swizzling, needs_clflush);
+}
+
+static int
+i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args)
+{
+ char __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int needs_clflush;
+ unsigned int idx, offset;
+ int ret;
+
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
+
+ ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ remain = args->size;
+ user_data = u64_to_user_ptr(args->data_ptr);
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pread(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ needs_clflush);
+ if (ret)
+ break;
+
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return ret;
+}
+
+static inline bool
+gtt_user_read(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *ioaddr;
void *vaddr;
- uint64_t unwritten;
+ unsigned long unwritten;
- ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)ioaddr + page_offset;
- if (pwrite)
- unwritten = __copy_from_user(vaddr, user_data, length);
- else
- unwritten = __copy_to_user(user_data, vaddr, length);
-
- io_mapping_unmap(ioaddr);
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_to_user(user_data, vaddr + offset, length);
+ io_mapping_unmap(vaddr);
+ }
return unwritten;
}
static int
-i915_gem_gtt_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj, uint64_t size,
- uint64_t data_offset, uint64_t data_ptr)
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_mm_node node;
- char __user *user_data;
- uint64_t remain;
- uint64_t offset;
+ struct i915_vma *vma;
+ void __user *user_data;
+ u64 remain, offset;
int ret;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+
+ intel_runtime_pm_get(i915);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -837,35 +962,21 @@ i915_gem_gtt_pread(struct drm_device *dev,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
goto out_unpin;
- user_data = u64_to_user_ptr(data_ptr);
- remain = size;
- offset = data_offset;
+ mutex_unlock(&i915->drm.struct_mutex);
- mutex_unlock(&dev->struct_mutex);
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_writeable(user_data, remain);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- goto out_unpin;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = args->offset;
while (remain > 0) {
/* Operation in this page
@@ -882,19 +993,14 @@ i915_gem_gtt_pread(struct drm_device *dev,
wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start,
- I915_CACHE_NONE, 0);
+ node.start, I915_CACHE_NONE, 0);
wmb();
} else {
page_base += offset & PAGE_MASK;
}
- /* This is a slow read/write as it tries to read from
- * and write to user memory which may result into page
- * faults, and so we cannot perform this under struct_mutex.
- */
- if (slow_user_access(&ggtt->mappable, page_base,
- page_offset, user_data,
- page_length, false)) {
+
+ if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
ret = -EFAULT;
break;
}
@@ -904,110 +1010,19 @@ i915_gem_gtt_pread(struct drm_device *dev,
offset += page_length;
}
- mutex_lock(&dev->struct_mutex);
- if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
-
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
node.start, node.size);
- i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
- return ret;
-}
-
-static int
-i915_gem_shmem_pread(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- char __user *user_data;
- ssize_t remain;
- loff_t offset;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int prefaulted = 0;
- int needs_clflush = 0;
- struct sg_page_iter sg_iter;
-
- ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
- if (ret)
- return ret;
-
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
-
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
- if (remain <= 0)
- break;
-
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pread_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
- if (ret == 0)
- goto next_page;
-
- mutex_unlock(&dev->struct_mutex);
-
- if (likely(!i915.prefault_disable) && !prefaulted) {
- ret = fault_in_pages_writeable(user_data, remain);
- /* Userspace is tricking us, but we've already clobbered
- * its pages with the prefault and promised to write the
- * data up to the first fault. Hence ignore any errors
- * and just continue. */
- (void)ret;
- prefaulted = 1;
- }
-
- ret = shmem_pread_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- needs_clflush);
-
- mutex_lock(&dev->struct_mutex);
-
- if (ret)
- goto out;
-
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
-out:
- i915_gem_obj_finish_shmem_access(obj);
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1026,7 +1041,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
- int ret = 0;
+ int ret;
if (args->size == 0)
return 0;
@@ -1044,36 +1059,29 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
- goto err;
+ goto out;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
- goto err;
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err;
+ goto out;
- ret = i915_gem_shmem_pread(dev, obj, args, file);
-
- /* pread for non shmem backed objects */
- if (ret == -EFAULT || ret == -ENODEV) {
- intel_runtime_pm_get(to_i915(dev));
- ret = i915_gem_gtt_pread(dev, obj, args->size,
- args->offset, args->data_ptr);
- intel_runtime_pm_put(to_i915(dev));
- }
+ ret = i915_gem_shmem_pread(obj, args);
+ if (ret == -EFAULT || ret == -ENODEV)
+ ret = i915_gem_gtt_pread(obj, args);
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
return ret;
}
@@ -1081,51 +1089,52 @@ err:
* page faults in the source data
*/
-static inline int
-fast_user_write(struct io_mapping *mapping,
- loff_t page_base, int page_offset,
- char __user *user_data,
- int length)
+static inline bool
+ggtt_write(struct io_mapping *mapping,
+ loff_t base, int offset,
+ char __user *user_data, int length)
{
- void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force*)vaddr_atomic + page_offset;
- unwritten = __copy_from_user_inatomic_nocache(vaddr,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = (void __force *)
+ io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_from_user(vaddr + offset, user_data, length);
+ io_mapping_unmap(vaddr);
+ }
+
return unwritten;
}
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
- * @i915: i915 device private data
- * @obj: i915 gem object
+ * @obj: i915 GEM object
* @args: pwrite arguments structure
- * @file: drm file pointer
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
- struct drm_device *dev = obj->base.dev;
- struct i915_vma *vma;
struct drm_mm_node node;
- uint64_t remain, offset;
- char __user *user_data;
+ struct i915_vma *vma;
+ u64 remain, offset;
+ void __user *user_data;
int ret;
- bool hit_slow_path = false;
- if (i915_gem_object_is_tiled(obj))
- return -EFAULT;
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
@@ -1138,25 +1147,19 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
}
}
if (IS_ERR(vma)) {
- ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret) {
- remove_mappable_node(&node);
- goto out;
- }
-
- i915_gem_object_pin_pages(obj);
+ goto out_unlock;
+ GEM_BUG_ON(!node.allocated);
}
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
+ mutex_unlock(&i915->drm.struct_mutex);
+
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->dirty = true;
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -1169,8 +1172,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
- unsigned page_offset = offset_in_page(offset);
- unsigned page_length = PAGE_SIZE - page_offset;
+ unsigned int page_offset = offset_in_page(offset);
+ unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */
@@ -1187,91 +1190,36 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (fast_user_write(&ggtt->mappable, page_base,
- page_offset, user_data, page_length)) {
- hit_slow_path = true;
- mutex_unlock(&dev->struct_mutex);
- if (slow_user_access(&ggtt->mappable,
- page_base,
- page_offset, user_data,
- page_length, true)) {
- ret = -EFAULT;
- mutex_lock(&dev->struct_mutex);
- goto out_flush;
- }
-
- mutex_lock(&dev->struct_mutex);
+ if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ user_data, page_length)) {
+ ret = -EFAULT;
+ break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-
-out_flush:
- if (hit_slow_path) {
- if (ret == 0 &&
- (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
- /* The user has modified the object whilst we tried
- * reading from it, and we now have no idea what domain
- * the pages should be in. As we have just been touching
- * them directly, flush everything back to the GTT
- * domain.
- */
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- }
- }
-
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+
+ mutex_lock(&i915->drm.struct_mutex);
out_unpin:
if (node.allocated) {
wmb();
ggtt->base.clear_range(&ggtt->base,
node.start, node.size);
- i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
i915_vma_unpin(vma);
}
-out:
+out_unlock:
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
-/* Per-page copy function for the shmem pwrite fastpath.
- * Flushes invalid cachelines before writing to the target if
- * needs_clflush_before is set and flushes out any written cachelines after
- * writing if needs_clflush is set. */
static int
-shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
- char __user *user_data,
- bool page_do_bit17_swizzling,
- bool needs_clflush_before,
- bool needs_clflush_after)
-{
- char *vaddr;
- int ret;
-
- if (unlikely(page_do_bit17_swizzling))
- return -EINVAL;
-
- vaddr = kmap_atomic(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
- user_data, page_length);
- if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + shmem_page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- return ret ? -EFAULT : 0;
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
-static int
-shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+shmem_pwrite_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
@@ -1282,124 +1230,114 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
vaddr = kmap(page);
if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user_swizzled(vaddr, offset, user_data,
+ length);
else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
+ ret = __copy_from_user(vaddr + offset, user_data, length);
if (needs_clflush_after)
- shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
- page_length,
+ shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
kunmap(page);
return ret ? -EFAULT : 0;
}
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set.
+ */
static int
-i915_gem_shmem_pwrite(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int shmem_page_offset, page_length, ret = 0;
- int obj_do_bit17_swizzling, page_do_bit17_swizzling;
- int hit_slowpath = 0;
- unsigned int needs_clflush;
- struct sg_page_iter sg_iter;
+ int ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
- if (ret)
- return ret;
+ ret = -ENODEV;
+ if (!page_do_bit17_swizzling) {
+ char *vaddr = kmap_atomic(page);
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + offset, len);
+ ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + offset, len);
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
- offset >> PAGE_SHIFT) {
- struct page *page = sg_page_iter_page(&sg_iter);
- int partial_cacheline_write;
+ kunmap_atomic(vaddr);
+ }
+ if (ret == 0)
+ return ret;
- if (remain <= 0)
- break;
+ return shmem_pwrite_slow(page, offset, len, user_data,
+ page_do_bit17_swizzling,
+ needs_clflush_before,
+ needs_clflush_after);
+}
- /* Operation in this page
- *
- * shmem_page_offset = offset within page in shmem file
- * page_length = bytes to copy for this page
- */
- shmem_page_offset = offset_in_page(offset);
-
- page_length = remain;
- if ((shmem_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - shmem_page_offset;
-
- /* If we don't overwrite a cacheline completely we need to be
- * careful to have up-to-date data by first clflushing. Don't
- * overcomplicate things and flush the entire patch. */
- partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
- ((shmem_page_offset | page_length)
- & (boot_cpu_data.x86_clflush_size - 1));
-
- page_do_bit17_swizzling = obj_do_bit17_swizzling &&
- (page_to_phys(page) & (1 << 17)) != 0;
-
- ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
- if (ret == 0)
- goto next_page;
-
- hit_slowpath = 1;
- mutex_unlock(&dev->struct_mutex);
- ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
- user_data, page_do_bit17_swizzling,
- partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
+static int
+i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ void __user *user_data;
+ u64 remain;
+ unsigned int obj_do_bit17_swizzling;
+ unsigned int partial_cacheline_write;
+ unsigned int needs_clflush;
+ unsigned int offset, idx;
+ int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
- if (ret)
- goto out;
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (ret)
+ return ret;
-next_page:
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ obj_do_bit17_swizzling = 0;
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ obj_do_bit17_swizzling = BIT(17);
-out:
- i915_gem_obj_finish_shmem_access(obj);
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch.
+ */
+ partial_cacheline_write = 0;
+ if (needs_clflush & CLFLUSH_BEFORE)
+ partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
- if (hit_slowpath) {
- /*
- * Fixup: Flush cpu caches in case we didn't flush the dirty
- * cachelines in-line while writing and the object moved
- * out of the cpu write domain while we've dropped the lock.
- */
- if (!(needs_clflush & CLFLUSH_AFTER) &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- if (i915_gem_clflush_object(obj, obj->pin_display))
- needs_clflush |= CLFLUSH_AFTER;
- }
- }
+ user_data = u64_to_user_ptr(args->data_ptr);
+ remain = args->size;
+ offset = offset_in_page(args->offset);
+ for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+ struct page *page = i915_gem_object_get_page(obj, idx);
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ ret = shmem_pwrite(page, offset, length, user_data,
+ page_to_phys(page) & obj_do_bit17_swizzling,
+ (offset | length) & partial_cacheline_write,
+ needs_clflush & CLFLUSH_AFTER);
+ if (ret)
+ break;
- if (needs_clflush & CLFLUSH_AFTER)
- i915_gem_chipset_flush(to_i915(dev));
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
}
@@ -1415,7 +1353,6 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1428,13 +1365,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- if (likely(!i915.prefault_disable)) {
- ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
- }
-
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -1448,15 +1378,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
goto err;
- intel_runtime_pm_get(dev_priv);
-
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err_rpm;
+ goto err;
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -1466,30 +1398,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
- cpu_write_needs_clflush(obj)) {
- ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
+ cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
- * textures). Fallback to the shmem path in that case. */
- }
+ * textures). Fallback to the shmem path in that case.
+ */
+ ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
else
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(obj, args);
}
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
- return ret;
-
-err_rpm:
- intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -1500,6 +1425,30 @@ write_origin(struct drm_i915_gem_object *obj, unsigned domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915;
+ struct list_head *list;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ continue;
+
+ if (i915_vma_is_active(vma))
+ continue;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ }
+
+ i915 = to_i915(obj->base.dev);
+ list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
+ list_move_tail(&obj->global_link, list);
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -1515,7 +1464,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
- int ret;
+ int err;
/* Only handle setting domains to types used by the CPU. */
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
@@ -1535,29 +1484,48 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
- if (ret)
- goto err;
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write_domain ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
+ if (err)
+ goto out;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err;
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out_unpin;
if (read_domains & I915_GEM_DOMAIN_GTT)
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
else
- ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+
+ /* And bump the LRU for this access */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ mutex_unlock(&dev->struct_mutex);
if (write_domain != 0)
intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-
-err:
- i915_gem_object_put_unlocked(obj);
- return ret;
+ return err;
}
/**
@@ -1587,7 +1555,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return err;
}
@@ -1633,7 +1601,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* pages from.
*/
if (!obj->base.filp) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
@@ -1645,7 +1613,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINTR;
}
vma = find_vma(mm, addr);
@@ -1659,7 +1627,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1771,7 +1739,14 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
- ret = __unsafe_wait_rendering(obj, NULL, !write);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
@@ -1804,7 +1779,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Use a partial view if it is bigger than available space */
chunk_size = MIN_CHUNK_PAGES;
if (i915_gem_object_is_tiled(obj))
- chunk_size = max(chunk_size, tile_row_pages(obj));
+ chunk_size = roundup(chunk_size, tile_row_pages(obj));
memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL;
@@ -1839,22 +1814,25 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
if (ret)
goto err_unpin;
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
+ if (list_empty(&obj->userfault_link))
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
- if (ret)
- goto err_unpin;
- obj->fault_mappable = true;
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(dev_priv);
+ i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
case -EIO:
@@ -1916,15 +1894,23 @@ err:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
+ *
+ * Note that RPM complicates somewhat by adding an additional
+ * requirement that operations to the GGTT be made holding the RPM
+ * wakeref.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
- if (!obj->fault_mappable)
- return;
+ if (list_empty(&obj->userfault_link))
+ goto out;
+ list_del_init(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
@@ -1937,16 +1923,45 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
*/
wmb();
- obj->fault_mappable = false;
+out:
+ intel_runtime_pm_put(i915);
}
-void
-i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj, *on;
+ int i;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- i915_gem_release_mmap(obj);
+ /*
+ * Only called during RPM suspend. All users of the userfault_list
+ * must be holding an RPM wakeref to ensure that this can not
+ * run concurrently with themselves (and use the struct_mutex for
+ * protection between themselves).
+ */
+
+ list_for_each_entry_safe(obj, on,
+ &dev_priv->mm.userfault_list, userfault_link) {
+ list_del_init(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ }
+
+ /* The fence will be lost when the device powers down. If any were
+ * in use by hardware (i.e. they are pinned), we should not be powering
+ * down! All other fences will be reacquired by the user upon waking.
+ */
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ if (WARN_ON(reg->pin_count))
+ continue;
+
+ if (!reg->vma)
+ continue;
+
+ GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+ reg->dirty = true;
+ }
}
/**
@@ -2060,7 +2075,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -2103,16 +2118,18 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
- obj->madv = __I915_MADV_PURGED;
+ obj->mm.madv = __I915_MADV_PURGED;
}
/* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;
- switch (obj->madv) {
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(obj->mm.pages);
+
+ switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
@@ -2127,82 +2144,83 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
}
static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
- int ret;
- BUG_ON(obj->madv == __I915_MADV_PURGED);
+ __i915_gem_object_release_shmem(obj);
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj, true);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
-
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_save_bit_17_swizzle(obj);
-
- if (obj->madv == I915_MADV_DONTNEED)
- obj->dirty = 0;
+ i915_gem_object_save_bit_17_swizzle(obj, pages);
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
- if (obj->madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
-int
-i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
- const struct drm_i915_gem_object_ops *ops = obj->ops;
+ struct radix_tree_iter iter;
+ void **slot;
- if (obj->pages == NULL)
- return 0;
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+}
- if (obj->pages_pin_count)
- return -EBUSY;
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
GEM_BUG_ON(obj->bind_count);
+ if (!READ_ONCE(obj->mm.pages))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
- list_del(&obj->global_list);
+ pages = fetch_and_zero(&obj->mm.pages);
+ GEM_BUG_ON(!pages);
- if (obj->mapping) {
+ if (obj->mm.mapping) {
void *ptr;
- ptr = ptr_mask_bits(obj->mapping);
+ ptr = ptr_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
- obj->mapping = NULL;
+ obj->mm.mapping = NULL;
}
- ops->put_pages(obj);
- obj->pages = NULL;
+ __i915_gem_object_reset_page_iter(obj);
- i915_gem_object_invalidate(obj);
-
- return 0;
+ obj->ops->put_pages(obj, pages);
+unlock:
+ mutex_unlock(&obj->mm.lock);
}
static unsigned int swiotlb_max_size(void)
@@ -2214,7 +2232,7 @@ static unsigned int swiotlb_max_size(void)
#endif
}
-static int
+static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2233,8 +2251,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_size();
if (!max_segment)
@@ -2242,12 +2260,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
/* Get the list of pages out of our struct file. They'll be pinned
@@ -2298,20 +2316,15 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
}
if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
- obj->pages = st;
- ret = i915_gem_gtt_prepare_object(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret)
goto err_pages;
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
-
- if (i915_gem_object_is_tiled(obj) &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
- i915_gem_object_pin_pages(obj);
+ i915_gem_object_do_bit_17_swizzle(obj, st);
- return 0;
+ return st;
err_pages:
sg_mark_end(sg);
@@ -2331,43 +2344,73 @@ err_pages:
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ret;
+ return ERR_PTR(ret);
}
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_get_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_put_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int
-i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- const struct drm_i915_gem_object_ops *ops = obj->ops;
- int ret;
+ lockdep_assert_held(&obj->mm.lock);
- if (obj->pages)
- return 0;
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
+
+ obj->mm.pages = pages;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+}
- if (obj->madv != I915_MADV_WILLNEED) {
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *pages;
+
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- BUG_ON(obj->pages_pin_count);
+ pages = obj->ops->get_pages(obj);
+ if (unlikely(IS_ERR(pages)))
+ return PTR_ERR(pages);
- ret = ops->get_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_set_pages(obj, pages);
+ return 0;
+}
- list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
- return 0;
+ if (unlikely(!obj->mm.pages)) {
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
@@ -2375,7 +2418,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->pages;
+ struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
@@ -2426,21 +2469,31 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
- ret = i915_gem_object_get_pages(obj);
+ ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
return ERR_PTR(ret);
- i915_gem_object_pin_pages(obj);
- pinned = obj->pages_pin_count > 1;
+ pinned = true;
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (unlikely(!obj->mm.pages)) {
+ ret = ____i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+ pinned = false;
+ }
+ GEM_BUG_ON(!obj->mm.pages);
- ptr = ptr_unpack_bits(obj->mapping, has_type);
+ ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
- goto err;
+ goto err_unpin;
}
if (is_vmalloc_addr(ptr))
@@ -2448,59 +2501,28 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
else
kunmap(kmap_to_page(ptr));
- ptr = obj->mapping = NULL;
+ ptr = obj->mm.mapping = NULL;
}
if (!ptr) {
ptr = i915_gem_object_map(obj, type);
if (!ptr) {
ret = -ENOMEM;
- goto err;
+ goto err_unpin;
}
- obj->mapping = ptr_pack_bits(ptr, type);
+ obj->mm.mapping = ptr_pack_bits(ptr, type);
}
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
return ptr;
-err:
- i915_gem_object_unpin_pages(obj);
- return ERR_PTR(ret);
-}
-
-static void
-i915_gem_object_retire__write(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_write);
-
- intel_fb_obj_flush(obj, true, ORIGIN_CS);
-}
-
-static void
-i915_gem_object_retire__read(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
-{
- int idx = request->engine->id;
- struct drm_i915_gem_object *obj =
- container_of(active, struct drm_i915_gem_object, last_read[idx]);
-
- GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
-
- i915_gem_object_clear_active(obj, idx);
- if (i915_gem_object_is_active(obj))
- return;
-
- /* Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (obj->bind_count)
- list_move_tail(&obj->global_list,
- &request->i915->mm.bound_list);
-
- i915_gem_object_put(obj);
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(ret);
+ goto out_unlock;
}
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2547,13 +2569,10 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
- list_for_each_entry(request, &engine->request_list, link) {
- if (i915_gem_request_completed(request))
+ list_for_each_entry(request, &engine->timeline->requests, link) {
+ if (__i915_gem_request_completed(request))
continue;
- if (!i915_sw_fence_done(&request->submit))
- break;
-
return request;
}
@@ -2581,6 +2600,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
+ struct intel_timeline *timeline;
bool ring_hung;
if (engine->irq_seqno_barrier)
@@ -2599,7 +2619,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
return;
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->fence.seqno);
+ engine->name, request->global_seqno);
/* Setup the CS to resume from the breadcrumb of the hung request */
engine->reset_hw(engine, request);
@@ -2616,9 +2636,13 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
- list_for_each_entry_continue(request, &engine->request_list, link)
+ list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
+
+ timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+ list_for_each_entry(request, &timeline->requests, link)
+ reset_request(request);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)
@@ -2626,6 +2650,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
i915_gem_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id)
@@ -2653,7 +2679,8 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
- intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+ intel_engine_init_global_seqno(engine,
+ intel_engine_last_submit(engine));
/*
* Clear the execlists queue up before freeing the requests, as those
@@ -2669,8 +2696,6 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
spin_unlock(&engine->execlist_lock);
}
-
- engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
}
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
@@ -2727,7 +2752,14 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
- if (READ_ONCE(dev_priv->gt.active_engines))
+ /*
+ * Wait for last execlists context complete, but bail out in case a
+ * new request is submitted.
+ */
+ wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
+ intel_execlists_idle(dev_priv), 10);
+
+ if (READ_ONCE(dev_priv->gt.active_requests))
return;
rearm_hangcheck =
@@ -2741,9 +2773,19 @@ i915_gem_idle_work_handler(struct work_struct *work)
goto out_rearm;
}
- if (dev_priv->gt.active_engines)
+ /*
+ * New request retired after this work handler started, extend active
+ * period until next instance of the work.
+ */
+ if (work_pending(work))
goto out_unlock;
+ if (dev_priv->gt.active_requests)
+ goto out_unlock;
+
+ if (wait_for(intel_execlists_idle(dev_priv), 10))
+ DRM_ERROR("Timeout waiting for engines to idle\n");
+
for_each_engine(engine, dev_priv, id)
i915_gem_batch_pool_fini(&engine->batch_pool);
@@ -2774,9 +2816,26 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
+
+ if (i915_gem_object_is_active(obj) &&
+ !i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_set_active_reference(obj);
+ i915_gem_object_get(obj);
+ }
mutex_unlock(&obj->base.dev->struct_mutex);
}
+static unsigned long to_wait_timeout(s64 timeout_ns)
+{
+ if (timeout_ns < 0)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ if (timeout_ns == 0)
+ return 0;
+
+ return nsecs_to_jiffies_timeout(timeout_ns);
+}
+
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @dev: drm device pointer
@@ -2805,10 +2864,9 @@ int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_gem_wait *args = data;
- struct intel_rps_client *rps = to_rps_client(file);
struct drm_i915_gem_object *obj;
- unsigned long active;
- int idx, ret = 0;
+ ktime_t start;
+ long ret;
if (args->flags != 0)
return -EINVAL;
@@ -2817,17 +2875,20 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!obj)
return -ENOENT;
- active = __I915_BO_ACTIVE(obj);
- for_each_active(active, idx) {
- s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
- ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
- I915_WAIT_INTERRUPTIBLE,
- timeout, rps);
- if (ret)
- break;
+ start = ktime_get();
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+ to_wait_timeout(args->timeout_ns),
+ to_rps_client(file));
+
+ if (args->timeout_ns > 0) {
+ args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (args->timeout_ns < 0)
+ args->timeout_ns = 0;
}
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return ret;
}
@@ -2848,6 +2909,8 @@ int i915_vma_unbind(struct i915_vma *vma)
unsigned long active;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
/* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
@@ -2859,6 +2922,13 @@ int i915_vma_unbind(struct i915_vma *vma)
* In order to prevent it from being recursively closed,
* take a pin on the vma so that the second unbind is
* aborted.
+ *
+ * Even more scary is that the retire callback may free
+ * the object (last active vma). To prevent the explosion
+ * we defer the actual object free to a worker that can
+ * only proceed once it acquires the struct_mutex (which
+ * we currently hold, therefore it cannot free this object
+ * before we are finished).
*/
__i915_vma_pin(vma);
@@ -2883,7 +2953,7 @@ int i915_vma_unbind(struct i915_vma *vma)
goto destroy;
GEM_BUG_ON(obj->bind_count == 0);
- GEM_BUG_ON(!obj->pages);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (i915_vma_is_map_and_fenceable(vma)) {
/* release the fence reg _after_ flushing */
@@ -2907,7 +2977,7 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- if (vma->pages != obj->pages) {
+ if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages);
kfree(vma->pages);
@@ -2917,7 +2987,7 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (--obj->bind_count == 0)
- list_move_tail(&obj->global_list,
+ list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
@@ -2933,18 +3003,26 @@ destroy:
return 0;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags)
+static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int ret;
+ int ret, i;
- for_each_engine(engine, dev_priv, id) {
- if (engine->last_context == NULL)
- continue;
+ for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+ ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
- ret = intel_engine_idle(engine, flags);
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+{
+ struct i915_gem_timeline *tl;
+ int ret;
+
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ ret = wait_for_timeline(tl, flags);
if (ret)
return ret;
}
@@ -3040,12 +3118,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return -E2BIG;
}
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (offset & (alignment - 1) || offset > end - size) {
@@ -3108,9 +3184,10 @@ search_free:
}
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
- list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
obj->bind_count++;
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
return 0;
@@ -3127,7 +3204,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj->pages == NULL)
+ if (!obj->mm.pages)
return false;
/*
@@ -3151,7 +3228,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
}
trace_i915_gem_object_clflush(obj);
- drm_clflush_sg(obj->pages);
+ drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
return true;
@@ -3211,24 +3288,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
I915_GEM_DOMAIN_CPU);
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- continue;
-
- if (i915_vma_is_active(vma))
- continue;
-
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- }
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
* @obj: object to act on
@@ -3243,7 +3302,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3258,7 +3324,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
@@ -3277,21 +3343,19 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
- obj->dirty = 1;
+ obj->mm.dirty = true;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
+ i915_gem_object_unpin_pages(obj);
return 0;
}
@@ -3316,6 +3380,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret = 0;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
if (obj->cache_level == cache_level)
goto out;
@@ -3360,7 +3426,12 @@ restart:
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
*/
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3428,10 +3499,14 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
+ int err = 0;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto out;
+ }
switch (obj->cache_level) {
case I915_CACHE_LLC:
@@ -3447,15 +3522,15 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
args->caching = I915_CACHING_NONE;
break;
}
-
- i915_gem_object_put_unlocked(obj);
- return 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
@@ -3472,23 +3547,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
return -ENODEV;
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
- level = HAS_WT(dev_priv) ? I915_CACHE_WT : I915_CACHE_NONE;
+ level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto rpm_put;
+ return ret;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj) {
@@ -3497,13 +3570,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
}
ret = i915_gem_object_set_cache_level(obj, level);
-
i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
-rpm_put:
- intel_runtime_pm_put(dev_priv);
-
return ret;
}
@@ -3521,6 +3590,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 old_read_domains, old_write_domain;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
@@ -3554,8 +3625,22 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL)
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
PIN_MAPPABLE | PIN_NONBLOCK);
- if (IS_ERR(vma))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+ if (IS_ERR(vma)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int flags;
+
+ /* Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ flags = 0;
+ if (HAS_GMCH_DISPLAY(i915))
+ flags = PIN_MAPPABLE;
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+ }
if (IS_ERR(vma))
goto err_unpin_display;
@@ -3586,6 +3671,8 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+
if (WARN_ON(vma->obj->pin_display == 0))
return;
@@ -3613,7 +3700,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_wait_rendering(obj, !write);
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
if (ret)
return ret;
@@ -3635,7 +3729,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -3669,11 +3763,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
- int ret;
-
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
+ long ret;
/* ABI: return -EIO if already wedged */
if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -3700,10 +3790,12 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+ ret = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(target);
- return ret;
+ return ret < 0 ? ret : 0;
}
static bool
@@ -3770,6 +3862,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
unsigned int bound = vma->flags;
int ret;
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
@@ -3811,6 +3904,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
if (IS_ERR(vma))
return vma;
@@ -3901,83 +3996,42 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
}
static __always_inline unsigned int
-__busy_set_if_active(const struct i915_gem_active *active,
+__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
- struct drm_i915_gem_request *request;
+ struct drm_i915_gem_request *rq;
- request = rcu_dereference(active->request);
- if (!request || i915_gem_request_completed(request))
- return 0;
-
- /* This is racy. See __i915_gem_active_get_rcu() for an in detail
- * discussion of how to handle the race correctly, but for reporting
- * the busy state we err on the side of potentially reporting the
- * wrong engine as being busy (but we guarantee that the result
- * is at least self-consistent).
- *
- * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
- * whilst we are inspecting it, even under the RCU read lock as we are.
- * This means that there is a small window for the engine and/or the
- * seqno to have been overwritten. The seqno will always be in the
- * future compared to the intended, and so we know that if that
- * seqno is idle (on whatever engine) our request is idle and the
- * return 0 above is correct.
+ /* We have to check the current hw status of the fence as the uABI
+ * guarantees forward progress. We could rely on the idle worker
+ * to eventually flush us, but to minimise latency just ask the
+ * hardware.
*
- * The issue is that if the engine is switched, it is just as likely
- * to report that it is busy (but since the switch happened, we know
- * the request should be idle). So there is a small chance that a busy
- * result is actually the wrong engine.
- *
- * So why don't we care?
- *
- * For starters, the busy ioctl is a heuristic that is by definition
- * racy. Even with perfect serialisation in the driver, the hardware
- * state is constantly advancing - the state we report to the user
- * is stale.
- *
- * The critical information for the busy-ioctl is whether the object
- * is idle as userspace relies on that to detect whether its next
- * access will stall, or if it has missed submitting commands to
- * the hardware allowing the GPU to stall. We never generate a
- * false-positive for idleness, thus busy-ioctl is reliable at the
- * most fundamental level, and we maintain the guarantee that a
- * busy object left to itself will eventually become idle (and stay
- * idle!).
- *
- * We allow ourselves the leeway of potentially misreporting the busy
- * state because that is an optimisation heuristic that is constantly
- * in flux. Being quickly able to detect the busy/idle state is much
- * more important than accurate logging of exactly which engines were
- * busy.
- *
- * For accuracy in reporting the engine, we could use
- *
- * result = 0;
- * request = __i915_gem_active_get_rcu(active);
- * if (request) {
- * if (!i915_gem_request_completed(request))
- * result = flag(request->engine->exec_id);
- * i915_gem_request_put(request);
- * }
- *
- * but that still remains susceptible to both hardware and userspace
- * races. So we accept making the result of that race slightly worse,
- * given the rarity of the race and its low impact on the result.
+ * Note we only report on the status of native fences.
*/
- return flag(READ_ONCE(request->engine->exec_id));
+ if (!dma_fence_is_i915(fence))
+ return 0;
+
+ /* opencode to_request() in order to avoid const warnings */
+ rq = container_of(fence, struct drm_i915_gem_request, fence);
+ if (i915_gem_request_completed(rq))
+ return 0;
+
+ return flag(rq->engine->exec_id);
}
static __always_inline unsigned int
-busy_check_reader(const struct i915_gem_active *active)
+busy_check_reader(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_read_flag);
+ return __busy_set_if_active(fence, __busy_read_flag);
}
static __always_inline unsigned int
-busy_check_writer(const struct i915_gem_active *active)
+busy_check_writer(const struct dma_fence *fence)
{
- return __busy_set_if_active(active, __busy_write_id);
+ if (!fence)
+ return 0;
+
+ return __busy_set_if_active(fence, __busy_write_id);
}
int
@@ -3986,64 +4040,58 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- unsigned long active;
+ struct reservation_object_list *list;
+ unsigned int seq;
+ int err;
- obj = i915_gem_object_lookup(file, args->handle);
+ err = -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
if (!obj)
- return -ENOENT;
+ goto out;
- args->busy = 0;
- active = __I915_BO_ACTIVE(obj);
- if (active) {
- int idx;
+ /* A discrepancy here is that we do not report the status of
+ * non-i915 fences, i.e. even though we may report the object as idle,
+ * a call to set-domain may still stall waiting for foreign rendering.
+ * This also means that wait-ioctl may report an object as busy,
+ * where busy-ioctl considers it idle.
+ *
+ * We trade the ability to warn of foreign fences to report on which
+ * i915 engines are active for the object.
+ *
+ * Alternatively, we can trade that extra information on read/write
+ * activity with
+ * args->busy =
+ * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * to report the overall busyness. This is what the wait-ioctl does.
+ *
+ */
+retry:
+ seq = raw_read_seqcount(&obj->resv->seq);
- /* Yes, the lookups are intentionally racy.
- *
- * First, we cannot simply rely on __I915_BO_ACTIVE. We have
- * to regard the value as stale and as our ABI guarantees
- * forward progress, we confirm the status of each active
- * request with the hardware.
- *
- * Even though we guard the pointer lookup by RCU, that only
- * guarantees that the pointer and its contents remain
- * dereferencable and does *not* mean that the request we
- * have is the same as the one being tracked by the object.
- *
- * Consider that we lookup the request just as it is being
- * retired and freed. We take a local copy of the pointer,
- * but before we add its engine into the busy set, the other
- * thread reallocates it and assigns it to a task on another
- * engine with a fresh and incomplete seqno. Guarding against
- * that requires careful serialisation and reference counting,
- * i.e. using __i915_gem_active_get_request_rcu(). We don't,
- * instead we expect that if the result is busy, which engines
- * are busy is not completely reliable - we only guarantee
- * that the object was busy.
- */
- rcu_read_lock();
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
- for_each_active(active, idx)
- args->busy |= busy_check_reader(&obj->last_read[idx]);
+ /* Translate shared fences to READ set of engines */
+ list = rcu_dereference(obj->resv->fence);
+ if (list) {
+ unsigned int shared_count = list->shared_count, i;
- /* For ABI sanity, we only care that the write engine is in
- * the set of read engines. This should be ensured by the
- * ordering of setting last_read/last_write in
- * i915_vma_move_to_active(), and then in reverse in retire.
- * However, for good measure, we always report the last_write
- * request as a busy read as well as being a busy write.
- *
- * We don't care that the set of active read/write engines
- * may change during construction of the result, as it is
- * equally liable to change before userspace can inspect
- * the result.
- */
- args->busy |= busy_check_writer(&obj->last_write);
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *fence =
+ rcu_dereference(list->shared[i]);
- rcu_read_unlock();
+ args->busy |= busy_check_reader(fence);
+ }
}
- i915_gem_object_put_unlocked(obj);
- return 0;
+ if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
+ goto retry;
+
+ err = 0;
+out:
+ rcu_read_unlock();
+ return err;
}
int
@@ -4060,7 +4108,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
- int ret;
+ int err;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4070,65 +4118,72 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
obj = i915_gem_object_lookup(file_priv, args->handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (!obj)
+ return -ENOENT;
- if (obj->pages &&
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ goto out;
+
+ if (obj->mm.pages &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (obj->madv == I915_MADV_WILLNEED)
- i915_gem_object_unpin_pages(obj);
- if (args->madv == I915_MADV_WILLNEED)
- i915_gem_object_pin_pages(obj);
+ if (obj->mm.madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (args->madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
- if (obj->madv != __I915_MADV_PURGED)
- obj->madv = args->madv;
+ if (obj->mm.madv != __I915_MADV_PURGED)
+ obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
+ if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
i915_gem_object_truncate(obj);
- args->retained = obj->madv != __I915_MADV_PURGED;
+ args->retained = obj->mm.madv != __I915_MADV_PURGED;
+ mutex_unlock(&obj->mm.lock);
+out:
i915_gem_object_put(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ return err;
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
- int i;
+ mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->global_list);
- for (i = 0; i < I915_NUM_ENGINES; i++)
- init_request_active(&obj->last_read[i],
- i915_gem_object_retire__read);
- init_request_active(&obj->last_write,
- i915_gem_object_retire__write);
+ INIT_LIST_HEAD(&obj->global_link);
+ INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
+ reservation_object_init(&obj->__builtin_resv);
+ obj->resv = &obj->__builtin_resv;
+
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- obj->madv = I915_MADV_WILLNEED;
+
+ obj->mm.madv = I915_MADV_WILLNEED;
+ INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_page.lock);
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
@@ -4140,6 +4195,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
struct drm_i915_gem_object *
i915_gem_object_create(struct drm_device *dev, u64 size)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
@@ -4165,7 +4221,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
@@ -4202,7 +4258,6 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
fail:
i915_gem_object_free(obj);
-
return ERR_PTR(ret);
}
@@ -4214,7 +4269,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
* back the contents from the GPU.
*/
- if (obj->madv != I915_MADV_WILLNEED)
+ if (obj->mm.madv != I915_MADV_WILLNEED)
return false;
if (obj->base.filp == NULL)
@@ -4230,16 +4285,72 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
return atomic_long_read(&obj->base.filp->f_count) == 1;
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+ struct llist_node *freed)
{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_vma *vma, *next;
+ struct drm_i915_gem_object *obj, *on;
- intel_runtime_pm_get(dev_priv);
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
+ llist_for_each_entry(obj, freed, freed) {
+ struct i915_vma *vma, *vn;
- trace_i915_gem_object_destroy(obj);
+ trace_i915_gem_object_destroy(obj);
+
+ GEM_BUG_ON(i915_gem_object_is_active(obj));
+ list_for_each_entry_safe(vma, vn,
+ &obj->vma_list, obj_link) {
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ i915_vma_close(vma);
+ }
+ GEM_BUG_ON(!list_empty(&obj->vma_list));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
+
+ list_del(&obj->global_link);
+ }
+ intel_runtime_pm_put(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ llist_for_each_entry_safe(obj, on, freed, freed) {
+ GEM_BUG_ON(obj->bind_count);
+ GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
+ if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+ atomic_set(&obj->mm.pages_pin_count, 0);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ GEM_BUG_ON(obj->mm.pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
+ reservation_object_fini(&obj->__builtin_resv);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(i915, obj->base.size);
+
+ kfree(obj->bit_17);
+ i915_gem_object_free(obj);
+ }
+}
+
+static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
+{
+ struct llist_node *freed;
+
+ freed = llist_del_all(&i915->mm.free_list);
+ if (unlikely(freed))
+ __i915_gem_free_objects(i915, freed);
+}
+
+static void __i915_gem_free_work(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, struct drm_i915_private, mm.free_work);
+ struct llist_node *freed;
/* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
@@ -4248,47 +4359,62 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* the GTT either for the user or for scanout). Those VMA still need to
* unbound now.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
- GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_close(vma);
- }
- GEM_BUG_ON(obj->bind_count);
- /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
- * before progressing. */
- if (obj->stolen)
- i915_gem_object_unpin_pages(obj);
+ while ((freed = llist_del_all(&i915->mm.free_list)))
+ __i915_gem_free_objects(i915, freed);
+}
+
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ /* We can't simply use call_rcu() from i915_gem_free_object()
+ * as we need to block whilst unbinding, and the call_rcu
+ * task may be called from softirq context. So we take a
+ * detour through a worker.
+ */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ schedule_work(&i915->mm.free_work);
+}
- WARN_ON(atomic_read(&obj->frontbuffer_bits));
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
- i915_gem_object_is_tiled(obj))
- i915_gem_object_unpin_pages(obj);
+ if (obj->mm.quirked)
+ __i915_gem_object_unpin_pages(obj);
- if (WARN_ON(obj->pages_pin_count))
- obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
- obj->madv = I915_MADV_DONTNEED;
- i915_gem_object_put_pages(obj);
-
- BUG_ON(obj->pages);
+ obj->mm.madv = I915_MADV_DONTNEED;
- if (obj->base.import_attach)
- drm_prime_gem_destroy(&obj->base, NULL);
+ /* Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
+ */
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
- if (obj->ops->release)
- obj->ops->release(obj);
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev_priv, obj->base.size);
+ GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+ if (i915_gem_object_is_active(obj))
+ i915_gem_object_set_active_reference(obj);
+ else
+ i915_gem_object_put(obj);
+}
- kfree(obj->bit_17);
- i915_gem_object_free(obj);
+static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- intel_runtime_pm_put(dev_priv);
+ for_each_engine(engine, dev_priv, id)
+ GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
}
int i915_gem_suspend(struct drm_device *dev)
@@ -4319,18 +4445,22 @@ int i915_gem_suspend(struct drm_device *dev)
goto err;
i915_gem_retire_requests(dev_priv);
+ GEM_BUG_ON(dev_priv->gt.active_requests);
+ assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
flush_delayed_work(&dev_priv->gt.idle_work);
+ flush_work(&dev_priv->mm.free_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
+ WARN_ON(!intel_execlists_idle(dev_priv));
/*
* Neither the BIOS, ourselves or any other kernel
@@ -4367,6 +4497,8 @@ void i915_gem_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(dev_priv->gt.awake);
+
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
@@ -4437,6 +4569,8 @@ i915_gem_init_hw(struct drm_device *dev)
enum intel_engine_id id;
int ret;
+ dev_priv->gt.last_init_time = ktime_get();
+
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4615,33 +4749,43 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
i915_gem_detect_bit_6_swizzle(dev);
}
-void
+int
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ int err = -ENOMEM;
+
+ dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->objects)
+ goto err_out;
+
+ dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->vmas)
+ goto err_objects;
- dev_priv->objects =
- kmem_cache_create("i915_gem_object",
- sizeof(struct drm_i915_gem_object), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->vmas =
- kmem_cache_create("i915_gem_vma",
- sizeof(struct i915_vma), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- dev_priv->requests =
- kmem_cache_create("i915_gem_request",
- sizeof(struct drm_i915_gem_request), 0,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU,
- NULL);
+ dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_DESTROY_BY_RCU);
+ if (!dev_priv->requests)
+ goto err_vmas;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ INIT_LIST_HEAD(&dev_priv->gt.timelines);
+ err = i915_gem_timeline_init(dev_priv,
+ &dev_priv->gt.global_timeline,
+ "[execution]");
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (err)
+ goto err_requests;
INIT_LIST_HEAD(&dev_priv->context_list);
+ INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+ init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4658,12 +4802,25 @@ i915_gem_load_init(struct drm_device *dev)
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
+
+ return 0;
+
+err_requests:
+ kmem_cache_destroy(dev_priv->requests);
+err_vmas:
+ kmem_cache_destroy(dev_priv->vmas);
+err_objects:
+ kmem_cache_destroy(dev_priv->objects);
+err_out:
+ return err;
}
void i915_gem_load_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
@@ -4712,7 +4869,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, global_list) {
+ list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -4804,21 +4961,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
-{
- struct page *page;
-
- /* Only default objects have per-page dirty tracking */
- if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
- return NULL;
-
- page = i915_gem_object_get_page(obj, n);
- set_page_dirty(page);
- return page;
-}
-
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
@@ -4837,14 +4979,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
if (ret)
goto fail;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto fail;
- i915_gem_object_pin_pages(obj);
- sg = obj->pages;
+ sg = obj->mm.pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
- obj->dirty = 1; /* Backing store is now out of date */
+ obj->mm.dirty = true; /* Backing store is now out of date */
i915_gem_object_unpin_pages(obj);
if (WARN_ON(bytes != size)) {
@@ -4859,3 +5000,156 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(ret);
}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+ GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ unsigned long exception, i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ exception =
+ RADIX_TREE_EXCEPTIONAL_ENTRY |
+ idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i,
+ (void *)exception);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radixtree will contain an exceptional entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(radix_tree_exception(sg))) {
+ unsigned long base =
+ (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 8292e79..735580d 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -31,4 +31,6 @@
#define GEM_BUG_ON(expr)
#endif
+#define I915_NUM_ENGINES 5
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index ed98959..b3bc119e 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -73,7 +73,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
@@ -97,9 +97,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
- struct drm_i915_gem_object *tmp, *next;
+ struct drm_i915_gem_object *tmp;
struct list_head *list;
- int n;
+ int n, ret;
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
@@ -112,40 +112,35 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
- list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+ list_for_each_entry(tmp, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
- if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
- &tmp->base.dev->struct_mutex))
+ if (i915_gem_object_is_active(tmp))
break;
- /* While we're looping, do some clean up */
- if (tmp->madv == __I915_MADV_PURGED) {
- list_del(&tmp->batch_pool_link);
- i915_gem_object_put(tmp);
- continue;
- }
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
+ true));
if (tmp->base.size >= size) {
+ /* Clear the set of shared fences early */
+ ww_mutex_lock(&tmp->resv->lock, NULL);
+ reservation_object_add_excl_fence(tmp->resv, NULL);
+ ww_mutex_unlock(&tmp->resv->lock);
+
obj = tmp;
break;
}
}
if (obj == NULL) {
- int ret;
-
- obj = i915_gem_object_create(&pool->engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(pool->engine->i915, size);
if (IS_ERR(obj))
return obj;
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- return ERR_PTR(ret);
-
- obj->madv = I915_MADV_DONTNEED;
}
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
list_move_tail(&obj->batch_pool_link, list);
- i915_gem_object_pin_pages(obj);
return obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5dca32a..6dd4757 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring)
intel_ring_free(ce->ring);
- i915_vma_put(ce->state);
+ __i915_gem_object_release_unless_active(ce->state->obj);
}
+ kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
@@ -303,19 +304,28 @@ __create_hw_context(struct drm_device *dev,
}
/* Default context will never have a file_priv */
- if (file_priv != NULL) {
+ ret = DEFAULT_CONTEXT_HANDLE;
+ if (file_priv) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
- } else
- ret = DEFAULT_CONTEXT_HANDLE;
+ }
+ ctx->user_handle = ret;
ctx->file_priv = file_priv;
- if (file_priv)
+ if (file_priv) {
ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
+ current->comm,
+ pid_nr(ctx->pid),
+ ctx->user_handle);
+ if (!ctx->name) {
+ ret = -ENOMEM;
+ goto err_pid;
+ }
+ }
- ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -329,6 +339,9 @@ __create_hw_context(struct drm_device *dev,
return ctx;
+err_pid:
+ put_pid(ctx->pid);
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
err_out:
context_close(ctx);
return ERR_PTR(ret);
@@ -352,9 +365,9 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
if (USES_FULL_PPGTT(dev)) {
- struct i915_hw_ppgtt *ppgtt =
- i915_ppgtt_create(to_i915(dev), file_priv);
+ struct i915_hw_ppgtt *ppgtt;
+ ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -751,12 +764,36 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false;
}
+struct i915_vma *
+i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
+ unsigned int flags)
+{
+ struct i915_vma *vma = ctx->engine[RCS].state;
+ int ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return vma;
+}
+
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_vma *vma = to->engine[RCS].state;
+ struct i915_vma *vma;
struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
@@ -764,17 +801,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- /* Clear this page out of any CPU caches for coherent swap-in/out. */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (ret)
- return ret;
- }
-
/* Trying to pin first makes error handling easier. */
- ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
- if (ret)
- return ret;
+ vma = i915_gem_context_pin_legacy(to, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
/*
* Pin can switch back to the default context if we end up calling into
@@ -931,22 +961,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ struct i915_gem_timeline *timeline;
enum intel_engine_id id;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
- if (engine->last_context == NULL)
- continue;
-
- if (engine->last_context == dev_priv->kernel_context)
- continue;
-
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Queue this switch after all other activity */
+ list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ struct drm_i915_gem_request *prev;
+ struct intel_timeline *tl;
+
+ tl = &timeline->engine[engine->id];
+ prev = i915_gem_active_raw(&tl->last_request,
+ &dev_priv->drm.struct_mutex);
+ if (prev)
+ i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ &prev->submit,
+ GFP_KERNEL);
+ }
+
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 97c9d68..5e38299 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -44,51 +44,42 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
- ret = i915_mutex_lock_interruptible(obj->base.dev);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto err_unlock;
-
- i915_gem_object_pin_pages(obj);
-
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
- goto err_unpin;
+ goto err_unpin_pages;
}
- ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
- src = obj->pages->sgl;
+ src = obj->mm.pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->pages->nents; i++) {
+ for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- ret =-ENOMEM;
+ ret = -ENOMEM;
goto err_free_sg;
}
- mutex_unlock(&obj->base.dev->struct_mutex);
return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
-err_unpin:
+err_unpin_pages:
i915_gem_object_unpin_pages(obj);
-err_unlock:
- mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
}
@@ -103,36 +94,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
sg_free_table(sg);
kfree(sg);
- mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
- mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- void *addr;
- int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
- addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- mutex_unlock(&dev->struct_mutex);
-
- return addr;
+ return i915_gem_object_pin_map(obj, I915_MAP_WB);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
- mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin_map(obj);
- mutex_unlock(&dev->struct_mutex);
}
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@@ -179,32 +155,45 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
- ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ err = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex);
- return ret;
+
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_mutex_lock_interruptible(dev);
+ if (err)
+ goto out;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
mutex_unlock(&dev->struct_mutex);
- return ret;
+out:
+ i915_gem_object_unpin_pages(obj);
+ return err;
}
static const struct dma_buf_ops i915_dmabuf_ops = {
@@ -222,60 +211,17 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-static void export_fences(struct drm_i915_gem_object *obj,
- struct dma_buf *dma_buf)
-{
- struct reservation_object *resv = dma_buf->resv;
- struct drm_i915_gem_request *req;
- unsigned long active;
- int idx;
-
- active = __I915_BO_ACTIVE(obj);
- if (!active)
- return;
-
- /* Serialise with execbuf to prevent concurrent fence-loops */
- mutex_lock(&obj->base.dev->struct_mutex);
-
- /* Mark the object for future fences before racily adding old fences */
- obj->base.dma_buf = dma_buf;
-
- ww_mutex_lock(&resv->lock, NULL);
-
- for_each_active(active, idx) {
- req = i915_gem_active_get(&obj->last_read[idx],
- &obj->base.dev->struct_mutex);
- if (!req)
- continue;
-
- if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &req->fence);
-
- i915_gem_request_put(req);
- }
-
- req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
- if (req) {
- reservation_object_add_excl_fence(resv, &req->fence);
- i915_gem_request_put(req);
- }
-
- ww_mutex_unlock(&resv->lock);
- mutex_unlock(&obj->base.dev->struct_mutex);
-}
-
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
+ exp_info.resv = obj->resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -283,30 +229,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
- if (IS_ERR(dma_buf))
- return dma_buf;
-
- export_fences(obj, dma_buf);
- return dma_buf;
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- struct sg_table *sg;
-
- sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg))
- return PTR_ERR(sg);
-
- obj->pages = sg;
- return 0;
+ return dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
}
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- dma_buf_unmap_attachment(obj->base.import_attach,
- obj->pages, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(obj->base.import_attach, pages,
+ DMA_BIDIRECTIONAL);
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
@@ -350,6 +287,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
+ obj->resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b5e9e66..bd08814 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,14 +33,17 @@
#include "intel_drv.h"
#include "i915_trace.h"
-static bool
-gpu_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
- if (intel_engine_is_active(engine))
+ struct intel_timeline *tl;
+
+ tl = &ggtt->base.timeline.engine[engine->id];
+ if (i915_gem_active_isset(&tl->last_request))
return false;
}
@@ -56,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
- if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+ if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false;
list_add(&vma->exec_list, unwind);
@@ -103,6 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *vma, *next;
int ret;
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -153,7 +157,7 @@ search_again:
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
- if (gpu_is_idle(dev_priv)) {
+ if (ggtt_is_idle(dev_priv)) {
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
@@ -213,6 +217,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
{
struct drm_mm_node *node, *next;
+ lockdep_assert_held(&target->vm->dev->struct_mutex);
+
list_for_each_entry_safe(node, next,
&target->vm->mm.head_node.node_list,
node_list) {
@@ -266,7 +272,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
- WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
+ lockdep_assert_held(&vm->dev->struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e52affd..fb5b443 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,7 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -332,7 +331,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->page = -1;
cache->vaddr = 0;
cache->i915 = i915;
- cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+ /* Must be a variable in the struct to allow GCC to unroll. */
+ cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->node.allocated = false;
}
@@ -418,15 +418,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
unsigned long offset;
void *vaddr;
- if (cache->node.allocated) {
- wmb();
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, page),
- cache->node.start, I915_CACHE_NONE, 0);
- cache->page = page;
- return unmask_page(cache->vaddr);
- }
-
if (cache->vaddr) {
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
@@ -466,6 +457,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
+ wmb();
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1109,44 +1101,20 @@ err:
return ret;
}
-static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
-{
- unsigned int mask;
-
- mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
- mask <<= I915_BO_ACTIVE_SHIFT;
-
- return mask;
-}
-
static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- struct reservation_object *resv;
- if (obj->flags & other_rings) {
- ret = i915_gem_request_await_object
- (req, obj, obj->base.pending_write_domain);
- if (ret)
- return ret;
- }
-
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- ret = i915_sw_fence_await_reservation
- (&req->submit, resv, &i915_fence_ops,
- obj->base.pending_write_domain, 10*HZ,
- GFP_KERNEL | __GFP_NOWARN);
- if (ret < 0)
- return ret;
- }
+ ret = i915_gem_request_await_object
+ (req, obj, obj->base.pending_write_domain);
+ if (ret)
+ return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
@@ -1279,6 +1247,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx;
}
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req,
unsigned int flags)
@@ -1288,8 +1262,6 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- obj->dirty = 1; /* be paranoid */
-
/* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is
* vital here: mark_active adds to the start of the callback list,
@@ -1297,37 +1269,32 @@ void i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!i915_gem_object_is_active(obj))
- i915_gem_object_get(obj);
- i915_gem_object_set_active(obj, idx);
- i915_gem_active_set(&obj->last_read[idx], req);
+ if (!i915_vma_is_active(vma))
+ obj->active_count++;
+ i915_vma_set_active(vma, idx);
+ i915_gem_active_set(&vma->last_read[idx], req);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
- i915_gem_active_set(&obj->last_write, req);
+ i915_gem_active_set(&vma->last_write, req);
intel_fb_obj_invalidate(obj, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
}
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
-
- i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], req);
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void eb_export_fence(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
unsigned int flags)
{
- struct reservation_object *resv;
-
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (!resv)
- return;
+ struct reservation_object *resv = obj->resv;
/* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a6daf2d..cd59dbc 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -343,6 +343,9 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ /* Note that we revoke fences on runtime suspend. Therefore the user
+ * must keep the device awake whilst using the fence.
+ */
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
/* Just update our place in the LRU if our fence is getting reused. */
@@ -368,19 +371,14 @@ i915_vma_get_fence(struct i915_vma *vma)
* @dev: DRM device
*
* Restore the hw fence state to match the software tracking again, to be called
- * after a gpu reset and on resume.
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
*/
void i915_gem_restore_fences(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int i;
- /* Note that this may be called outside of struct_mutex, by
- * runtime suspend/resume. The barrier we require is enforced by
- * rpm itself - all access to fences/GTT are only within an rpm
- * wakeref, and to acquire that wakeref you must pass through here.
- */
-
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
@@ -391,7 +389,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(vma->obj->fault_mappable);
+ GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
@@ -646,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
@@ -656,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
* by swapping them out and back in again).
*/
void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -666,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return;
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj->bit_17) != 0)) {
+ if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
@@ -680,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
+ const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter;
struct page *page;
- int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
@@ -705,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 062fb0a..a5fafa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,7 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
@@ -175,7 +176,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{
u32 pte_flags = 0;
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
@@ -706,6 +707,16 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
/* Removes entries from a single page table, releasing it if it's empty.
* Caller can use the return value to update higher-level entries.
*/
@@ -715,9 +726,9 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- unsigned int pte_start = gen8_pte_index(start);
unsigned int num_entries = gen8_pte_count(start, length);
- uint64_t pte;
+ unsigned int pte = gen8_pte_index(start);
+ unsigned int pte_end = pte + num_entries;
gen8_pte_t *pt_vaddr;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
@@ -725,7 +736,9 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
if (WARN_ON(!px_page(pt)))
return false;
- bitmap_clear(pt->used_ptes, pte_start, num_entries);
+ GEM_BUG_ON(pte_end > GEN8_PTES);
+
+ bitmap_clear(pt->used_ptes, pte, num_entries);
if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
free_pt(vm->dev, pt);
@@ -734,8 +747,8 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
pt_vaddr = kmap_px(pt);
- for (pte = pte_start; pte < num_entries; pte++)
- pt_vaddr[pte] = scratch_pte;
+ while (pte < pte_end)
+ pt_vaddr[pte++] = scratch_pte;
kunmap_px(ppgtt, pt_vaddr);
@@ -806,6 +819,8 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
}
}
+ mark_tlbs_dirty(ppgtt);
+
if (USES_FULL_48BIT_PPGTT(vm->dev) &&
bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev))) {
free_pdp(vm->dev, pdp);
@@ -1280,16 +1295,6 @@ err_out:
return -ENOMEM;
}
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
@@ -2184,8 +2189,10 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
}
static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv)
+ struct drm_i915_private *dev_priv,
+ const char *name)
{
+ i915_gem_timeline_init(dev_priv, &vm->timeline, name);
drm_mm_init(&vm->mm, vm->start, vm->total);
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
@@ -2214,14 +2221,15 @@ static void gtt_write_workarounds(struct drm_device *dev)
static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+ struct drm_i915_file_private *file_priv,
+ const char *name)
{
int ret;
ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret == 0) {
kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv);
+ i915_address_space_init(&ppgtt->base, dev_priv, name);
ppgtt->base.file = file_priv;
}
@@ -2257,7 +2265,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv)
+ struct drm_i915_file_private *fpriv,
+ const char *name)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@@ -2266,7 +2275,7 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
+ ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
@@ -2289,6 +2298,7 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
+ i915_gem_timeline_fini(&ppgtt->base.timeline);
list_del(&ppgtt->base.global_link);
drm_mm_takedown(&ppgtt->base.mm);
@@ -2370,14 +2380,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return -ENOSPC;
+ if (dma_map_sg(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return 0;
- return 0;
+ return -ENOSPC;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2395,16 +2406,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2418,11 +2424,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gen8_pte_t __iomem *gtt_entries;
gen8_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
@@ -2446,8 +2449,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
struct insert_entries {
@@ -2486,16 +2487,11 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
/*
@@ -2515,11 +2511,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t __iomem *gtt_entries;
gen6_pte_t gtt_entry;
dma_addr_t addr;
- int rpm_atomic_seq;
int i = 0;
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
@@ -2542,8 +2535,6 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void nop_clear_range(struct i915_address_space *vm,
@@ -2554,7 +2545,6 @@ static void nop_clear_range(struct i915_address_space *vm,
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2562,9 +2552,6 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2576,15 +2563,12 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -2592,9 +2576,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@@ -2607,8 +2588,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
@@ -2617,16 +2596,10 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2634,39 +2607,25 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
-
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
- int rpm_atomic_seq;
-
- rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
- intel_gtt_clear_range(first_entry, num_entries);
-
- assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
@@ -2679,8 +2638,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
/*
* Without aliasing PPGTT there's no difference between
@@ -2696,6 +2657,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
u32 pte_flags;
int ret;
@@ -2710,14 +2672,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm,
vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
}
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt =
- to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->pages, vma->node.start,
cache_level, pte_flags);
@@ -2728,19 +2691,24 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
- if (vma->flags & I915_VMA_GLOBAL_BIND)
+ if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm,
vma->node.start, size);
+ intel_runtime_pm_put(i915);
+ }
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
vma->node.start, size);
}
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2754,8 +2722,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
}
}
- dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -3274,11 +3241,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm.
*/
+ mutex_lock(&dev_priv->drm.struct_mutex);
ggtt->base.total -= PAGE_SIZE;
- i915_address_space_init(&ggtt->base, dev_priv);
+ i915_address_space_init(&ggtt->base, dev_priv, "[global]");
ggtt->base.total += PAGE_SIZE;
if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
dev_priv->ggtt.mappable_base,
@@ -3327,7 +3296,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_list) {
+ &dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@@ -3386,6 +3355,7 @@ i915_vma_retire(struct i915_gem_active *active,
const unsigned int idx = rq->engine->id;
struct i915_vma *vma =
container_of(active, struct i915_vma, last_read[idx]);
+ struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
@@ -3396,6 +3366,34 @@ i915_vma_retire(struct i915_gem_active *active,
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ if (--obj->active_count)
+ return;
+
+ /* Bump our place on the bound list to keep it roughly in LRU order
+ * so that we don't steal from recently used but inactive objects
+ * (unless we are forced to ofc!)
+ */
+ if (obj->bind_count)
+ list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+
+ obj->mm.dirty = true; /* be paranoid */
+
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static void
+i915_ggtt_retire__write(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ struct i915_vma *vma =
+ container_of(active, struct i915_vma, last_write);
+
+ intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
}
void i915_vma_destroy(struct i915_vma *vma)
@@ -3417,17 +3415,40 @@ void i915_vma_close(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
- list_del_init(&vma->obj_link);
+ list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
WARN_ON(i915_vma_unbind(vma));
}
+static inline long vma_compare(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+
+ if (vma->vm != vm)
+ return vma->vm - vm;
+
+ if (!view)
+ return vma->ggtt_view.type;
+
+ if (vma->ggtt_view.type != view->type)
+ return vma->ggtt_view.type - view->type;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params));
+}
+
static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
+ struct rb_node *rb, **p;
int i;
GEM_BUG_ON(vm->closed);
@@ -3439,6 +3460,8 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
+ init_request_active(&vma->last_write,
+ i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
init_request_active(&vma->last_fence, NULL);
list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
@@ -3459,33 +3482,28 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
if (i915_is_ggtt(vm)) {
vma->flags |= I915_VMA_GGTT;
+ list_add(&vma->obj_link, &obj->vma_list);
} else {
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
}
- list_add_tail(&vma->obj_link, &obj->vma_list);
- return vma;
-}
-
-static inline bool vma_matches(struct i915_vma *vma,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- if (vma->vm != vm)
- return false;
+ rb = NULL;
+ p = &obj->vma_tree.rb_node;
+ while (*p) {
+ struct i915_vma *pos;
- if (!i915_vma_is_ggtt(vma))
- return true;
-
- if (!view)
- return vma->ggtt_view.type == 0;
-
- if (vma->ggtt_view.type != view->type)
- return false;
+ rb = *p;
+ pos = rb_entry(rb, struct i915_vma, obj_node);
+ if (vma_compare(pos, vm, view) < 0)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&vma->obj_node, rb, p);
+ rb_insert_color(&vma->obj_node, &obj->vma_tree);
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params)) == 0;
+ return vma;
}
struct i915_vma *
@@ -3493,6 +3511,7 @@ i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
@@ -3504,12 +3523,23 @@ i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
- struct i915_vma *vma;
+ struct rb_node *rb;
+
+ rb = obj->vma_tree.rb_node;
+ while (rb) {
+ struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+ long cmp;
- list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
- if (vma_matches(vma, vm, view))
+ cmp = vma_compare(vma, vm, view);
+ if (cmp == 0)
return vma;
+ if (cmp < 0)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+
return NULL;
}
@@ -3520,11 +3550,14 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
vma = i915_gem_obj_to_vma(obj, vm, view);
- if (!vma)
+ if (!vma) {
vma = __i915_vma_create(obj, vm, view);
+ GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
+ }
GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
@@ -3590,7 +3623,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages);
@@ -3626,35 +3659,47 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
- struct scatterlist *sg;
- struct sg_page_iter obj_sg_iter;
+ struct scatterlist *sg, *iter;
+ unsigned int count = view->params.partial.size;
+ unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
- ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+ ret = sg_alloc_table(st, count, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
+ iter = i915_gem_object_get_sg(obj,
+ view->params.partial.offset,
+ &offset);
+ GEM_BUG_ON(!iter);
+
sg = st->sgl;
st->nents = 0;
- for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
- view->params.partial.offset)
- {
- if (st->nents >= view->params.partial.size)
- break;
+ do {
+ unsigned int len;
- sg_set_page(sg, NULL, PAGE_SIZE, 0);
- sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
- sg_dma_len(sg) = PAGE_SIZE;
+ len = min(iter->length - (offset << PAGE_SHIFT),
+ count << PAGE_SHIFT);
+ sg_set_page(sg, NULL, len, 0);
+ sg_dma_address(sg) =
+ sg_dma_address(iter) + (offset << PAGE_SHIFT);
+ sg_dma_len(sg) = len;
- sg = sg_next(sg);
st->nents++;
- }
+ count -= len >> PAGE_SHIFT;
+ if (count == 0) {
+ sg_mark_end(sg);
+ return st;
+ }
- return st;
+ sg = __sg_next(sg);
+ iter = __sg_next(iter);
+ offset = 0;
+ } while (1);
err_sg_alloc:
kfree(st);
@@ -3667,11 +3712,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
int ret = 0;
+ /* The vma->pages are only valid within the lifespan of the borrowed
+ * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
+ * must be the vma->pages. A simple rule is that vma->pages must only
+ * be accessed when the obj->mm.pages are pinned.
+ */
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
+
if (vma->pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- vma->pages = vma->obj->pages;
+ vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
@@ -3778,11 +3830,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
{
struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
vma = fetch_and_zero(p_vma);
if (!vma)
return;
+ obj = vma->obj;
+
i915_vma_unpin(vma);
- i915_vma_put(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_release_unless_active(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c241d81..c23ef9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -211,6 +211,7 @@ struct i915_vma {
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_write;
struct i915_gem_active last_fence;
/**
@@ -226,6 +227,7 @@ struct i915_vma {
struct list_head vm_link;
struct list_head obj_link; /* Link in the object's VMA list */
+ struct rb_node obj_node;
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
@@ -341,6 +343,7 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
+ struct i915_gem_timeline timeline;
struct drm_device *dev;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
@@ -612,7 +615,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_device *dev);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv);
+ struct drm_i915_file_private *fpriv,
+ const char *name);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -628,8 +632,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
new file mode 100644
index 0000000..4b3ff3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+
+/* convert swiotlb segment size into sensible units (pages)! */
+#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
+
+static void internal_free_pages(struct sg_table *st)
+{
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+
+ sg_free_table(st);
+ kfree(st);
+}
+
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int max_order;
+ gfp_t gfp;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+
+ max_order = MAX_ORDER;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
+ max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+#endif
+
+ gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ gfp &= ~__GFP_HIGHMEM;
+ gfp |= __GFP_DMA32;
+ }
+
+ do {
+ int order = min(fls(npages) - 1, max_order);
+ struct page *page;
+
+ do {
+ page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ if (page)
+ break;
+ if (!order--)
+ goto err;
+
+ /* Limit subsequent allocations as well */
+ max_order = order;
+ } while (1);
+
+ sg_set_page(sg, page, PAGE_SIZE << order, 0);
+ st->nents++;
+
+ npages -= 1 << order;
+ if (!npages) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while (1);
+
+ if (i915_gem_gtt_prepare_pages(obj, st))
+ goto err;
+
+ /* Mark the pages as dontneed whilst they are still pinned. As soon
+ * as they are unpinned they are allowed to be reaped by the shrinker,
+ * and the caller is expected to repopulate - the contents of this
+ * object are only valid whilst active and pinned.
+ */
+ obj->mm.madv = I915_MADV_DONTNEED;
+ return st;
+
+err:
+ sg_mark_end(sg);
+ internal_free_pages(st);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ internal_free_pages(pages);
+
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = i915_gem_object_get_pages_internal,
+ .put_pages = i915_gem_object_put_pages_internal,
+};
+
+/**
+ * Creates a new object that wraps some internal memory for private use.
+ * This object is not backed by swappable storage, and as such its contents
+ * are volatile and only valid whilst pinned. If the object is reaped by the
+ * shrinker, its pages and data will be discarded. Equally, it is not a full
+ * GEM object and so not valid for access from userspace. This makes it useful
+ * for hardware interfaces like ringbuffers (which are pinned from the time
+ * the request is written to the time the hardware stops accessing it), but
+ * not for contexts (which need to be preserved when not active for later
+ * reuse). Note that it is not cleared upon allocation.
+ */
+struct drm_i915_gem_object *
+i915_gem_object_create_internal(struct drm_i915_private *i915,
+ unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(&i915->drm);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a98c0f4..5af19b0 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,17 +28,19 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct render_state {
+struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
struct i915_vma *vma;
- u32 aux_batch_size;
- u32 aux_batch_offset;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
};
static const struct intel_renderstate_rodata *
-render_state_get_rodata(const struct drm_i915_gem_request *req)
+render_state_get_rodata(const struct intel_engine_cs *engine)
{
- switch (INTEL_GEN(req->i915)) {
+ switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
case 7:
@@ -63,29 +65,26 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
*/
#define OUT_BATCH(batch, i, val) \
do { \
- if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
- ret = -ENOSPC; \
- goto err_out; \
- } \
+ if ((i) >= PAGE_SIZE / sizeof(u32)) \
+ goto err; \
(batch)[(i)++] = (val); \
} while(0)
-static int render_state_setup(struct render_state *so)
+static int render_state_setup(struct intel_render_state *so,
+ struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(so->vma->vm->dev);
const struct intel_renderstate_rodata *rodata = so->rodata;
- const bool has_64bit_reloc = INTEL_GEN(dev_priv) >= 8;
+ struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
- struct page *page;
+ unsigned int needs_clflush;
u32 *d;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (ret)
return ret;
- page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
- d = kmap(page);
+ d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -93,12 +92,10 @@ static int render_state_setup(struct render_state *so)
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start;
s = lower_32_bits(r);
- if (has_64bit_reloc) {
+ if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
- rodata->batch[i + 1] != 0) {
- ret = -EINVAL;
- goto err_out;
- }
+ rodata->batch[i + 1] != 0)
+ goto err;
d[i++] = s;
s = upper_32_bits(r);
@@ -110,12 +107,20 @@ static int render_state_setup(struct render_state *so)
d[i++] = s;
}
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
+ goto err;
+ }
+
+ so->batch_offset = so->vma->node.start;
+ so->batch_size = rodata->batch_items * sizeof(u32);
+
while (i % CACHELINE_DWORDS)
OUT_BATCH(d, i, MI_NOOP);
- so->aux_batch_offset = i * sizeof(u32);
+ so->aux_offset = i * sizeof(u32);
- if (HAS_POOLED_EU(dev_priv)) {
+ if (HAS_POOLED_EU(i915)) {
/*
* We always program 3x6 pool config but depending upon which
* subslice is disabled HW drops down to appropriate config
@@ -143,88 +148,133 @@ static int render_state_setup(struct render_state *so)
}
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
- so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
-
+ so->aux_size = i * sizeof(u32) - so->aux_offset;
+ so->aux_offset += so->batch_offset;
/*
* Since we are sending length, we need to strictly conform to
* all requirements. For Gen2 this must be a multiple of 8.
*/
- so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
-
- kunmap(page);
-
- ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
- if (ret)
- return ret;
-
- if (rodata->reloc[reloc_index] != -1) {
- DRM_ERROR("only %d relocs resolved\n", reloc_index);
- return -EINVAL;
- }
+ so->aux_size = ALIGN(so->aux_size, 8);
- return 0;
+ if (needs_clflush)
+ drm_clflush_virt_range(d, i * sizeof(u32));
+ kunmap_atomic(d);
-err_out:
- kunmap(page);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+out:
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
+
+err:
+ kunmap_atomic(d);
+ ret = -EINVAL;
+ goto out;
}
#undef OUT_BATCH
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
+int i915_gem_render_state_init(struct intel_engine_cs *engine)
{
- struct render_state so;
+ struct intel_render_state *so;
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
int ret;
- if (WARN_ON(req->engine->id != RCS))
- return -ENOENT;
+ if (engine->id != RCS)
+ return 0;
- so.rodata = render_state_get_rodata(req);
- if (!so.rodata)
+ rodata = render_state_get_rodata(engine);
+ if (!rodata)
return 0;
- if (so.rodata->batch_items * 4 > 4096)
+ if (rodata->batch_items * 4 > 4096)
return -EINVAL;
- obj = i915_gem_object_create(&req->i915->drm, 4096);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ so = kmalloc(sizeof(*so), GFP_KERNEL);
+ if (!so)
+ return -ENOMEM;
- so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
- if (IS_ERR(so.vma)) {
- ret = PTR_ERR(so.vma);
- goto err_obj;
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err_free;
}
- ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
- if (ret)
+ so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(so->vma)) {
+ ret = PTR_ERR(so->vma);
goto err_obj;
+ }
+
+ so->rodata = rodata;
+ engine->render_state = so;
+ return 0;
+
+err_obj:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(so);
+ return ret;
+}
+
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
+{
+ struct intel_render_state *so;
+ int ret;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
- ret = render_state_setup(&so);
+ so = req->engine->render_state;
+ if (!so)
+ return 0;
+
+ /* Recreate the page after shrinking */
+ if (!so->vma->obj->mm.pages)
+ so->batch_offset = -1;
+
+ ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
- goto err_unpin;
+ return ret;
+
+ if (so->vma->node.start != so->batch_offset) {
+ ret = render_state_setup(so, req->i915);
+ if (ret)
+ goto err_unpin;
+ }
- ret = req->engine->emit_bb_start(req, so.vma->node.start,
- so.rodata->batch_items * 4,
+ ret = req->engine->emit_bb_start(req,
+ so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
- if (so.aux_batch_size > 8) {
+ if (so->aux_size > 8) {
ret = req->engine->emit_bb_start(req,
- (so.vma->node.start +
- so.aux_batch_offset),
- so.aux_batch_size,
+ so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, req, 0);
+ i915_vma_move_to_active(so->vma, req, 0);
err_unpin:
- i915_vma_unpin(so.vma);
-err_obj:
- i915_gem_object_put(obj);
+ i915_vma_unpin(so->vma);
return ret;
}
+
+void i915_gem_render_state_fini(struct intel_engine_cs *engine)
+{
+ struct intel_render_state *so;
+ struct drm_i915_gem_object *obj;
+
+ so = fetch_and_zero(&engine->render_state);
+ if (!so)
+ return;
+
+ obj = so->vma->obj;
+
+ i915_vma_close(so->vma);
+ __i915_gem_object_release_unless_active(obj);
+
+ kfree(so);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 18cce3f..8748184 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,6 +26,8 @@
struct drm_i915_gem_request;
-int i915_gem_render_state_init(struct drm_i915_gem_request *req);
+int i915_gem_render_state_init(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
+void i915_gem_render_state_fini(struct intel_engine_cs *engine);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index f9af2a0..0b3b051 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -23,6 +23,7 @@
*/
#include <linux/prefetch.h>
+#include <linux/dma-fence-array.h>
#include "i915_drv.h"
@@ -33,13 +34,7 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
- /* Timelines are bound by eviction to a VM. However, since
- * we only have a global seqno at the moment, we only have
- * a single timeline. Note that each timeline will have
- * multiple execution contexts (fence contexts) as we allow
- * engines within a single timeline to execute in parallel.
- */
- return "global";
+ return to_request(fence)->timeline->common->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -58,43 +53,9 @@ static bool i915_fence_enable_signaling(struct dma_fence *fence)
static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
- signed long timeout_jiffies)
+ signed long timeout)
{
- s64 timeout_ns, *timeout;
- int ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
- timeout_ns = jiffies_to_nsecs(timeout_jiffies);
- timeout = &timeout_ns;
- } else {
- timeout = NULL;
- }
-
- ret = i915_wait_request(to_request(fence),
- interruptible, timeout,
- NO_WAITBOOST);
- if (ret == -ETIME)
- return 0;
-
- if (ret < 0)
- return ret;
-
- if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
- timeout_jiffies = nsecs_to_jiffies(timeout_ns);
-
- return timeout_jiffies;
-}
-
-static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
-{
- snprintf(str, size, "%u", fence->seqno);
-}
-
-static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%u",
- intel_engine_get_seqno(to_request(fence)->engine));
+ return i915_wait_request(to_request(fence), interruptible, timeout);
}
static void i915_fence_release(struct dma_fence *fence)
@@ -111,8 +72,6 @@ const struct dma_fence_ops i915_fence_ops = {
.signaled = i915_fence_signaled,
.wait = i915_fence_wait,
.release = i915_fence_release,
- .fence_value_str = i915_fence_value_str,
- .timeline_value_str = i915_fence_timeline_value_str,
};
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -164,8 +123,14 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
struct i915_gem_active *active, *next;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_gem_request_completed(request));
+
trace_i915_gem_request_retire(request);
- list_del(&request->link);
+
+ spin_lock_irq(&request->engine->timeline->lock);
+ list_del_init(&request->link);
+ spin_unlock_irq(&request->engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -177,6 +142,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
*/
list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
+ request->i915->gt.active_requests--;
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -214,6 +180,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
}
i915_gem_context_put(request->ctx);
+
+ dma_fence_signal(&request->fence);
i915_gem_request_put(request);
}
@@ -223,10 +191,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(list_empty(&req->link));
+ if (list_empty(&req->link))
+ return;
do {
- tmp = list_first_entry(&engine->request_list,
+ tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
i915_gem_request_retire(tmp);
@@ -253,40 +222,51 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
return 0;
}
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
+ struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv, id) {
- ret = intel_engine_idle(engine,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
- if (ret)
- return ret;
- }
- i915_gem_retire_requests(dev_priv);
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
- while (intel_kick_waiters(dev_priv) ||
- intel_kick_signalers(dev_priv))
+ if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
+ while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
yield();
+ yield();
}
+ atomic_set(&timeline->next_seqno, seqno);
/* Finally reset hw state */
- for_each_engine(engine, dev_priv, id)
- intel_engine_init_seqno(engine, seqno);
+ for_each_engine(engine, i915, id)
+ intel_engine_init_global_seqno(engine, seqno);
+
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl = &timeline->engine[id];
+
+ memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+ }
+ }
return 0;
}
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
@@ -294,48 +274,84 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev_priv, seqno - 1);
- if (ret)
- return ret;
-
- dev_priv->next_seqno = seqno;
- return 0;
+ return i915_gem_init_global_seqno(dev_priv, seqno - 1);
}
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static int reserve_global_seqno(struct drm_i915_private *i915)
{
- /* reserve 0 for non-seqno */
- if (unlikely(dev_priv->next_seqno == 0)) {
- int ret;
+ u32 active_requests = ++i915->gt.active_requests;
+ u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
+ int ret;
- ret = i915_gem_init_seqno(dev_priv, 0);
- if (ret)
- return ret;
+ /* Reservation is fine until we need to wrap around */
+ if (likely(next_seqno + active_requests > next_seqno))
+ return 0;
- dev_priv->next_seqno = 1;
+ ret = i915_gem_init_global_seqno(i915, 0);
+ if (ret) {
+ i915->gt.active_requests--;
+ return ret;
}
- *seqno = dev_priv->next_seqno++;
return 0;
}
+static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
+{
+ /* next_seqno only incremented under a mutex */
+ return ++tl->next_seqno.counter;
+}
+
+static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
+{
+ return atomic_inc_return(&tl->next_seqno);
+}
+
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), submit);
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_timeline *timeline;
+ unsigned long flags;
+ u32 seqno;
+
+ if (state != FENCE_COMPLETE)
+ return NOTIFY_DONE;
+
+ /* Transfer from per-context onto the global per-engine timeline */
+ timeline = engine->timeline;
+ GEM_BUG_ON(timeline == request->timeline);
/* Will be called from irq-context when using foreign DMA fences */
+ spin_lock_irqsave(&timeline->lock, flags);
- switch (state) {
- case FENCE_COMPLETE:
- request->engine->last_submitted_seqno = request->fence.seqno;
- request->engine->submit_request(request);
- break;
+ seqno = timeline_get_seqno(timeline->common);
+ GEM_BUG_ON(!seqno);
+ GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
- case FENCE_FREE:
- break;
- }
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
+ request->previous_seqno = timeline->last_submitted_seqno;
+ timeline->last_submitted_seqno = seqno;
+
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ request->global_seqno = seqno;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_enable_signaling(request);
+ spin_unlock(&request->lock);
+
+ GEM_BUG_ON(!request->global_seqno);
+ engine->emit_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
+ engine->submit_request(request);
+
+ spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+ list_move_tail(&request->link, &timeline->requests);
+ spin_unlock(&request->timeline->lock);
+
+ spin_unlock_irqrestore(&timeline->lock, flags);
return NOTIFY_DONE;
}
@@ -358,9 +374,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
- u32 seqno;
int ret;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
* and restart.
@@ -369,10 +386,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
+ ret = reserve_global_seqno(dev_priv);
+ if (ret)
+ return ERR_PTR(ret);
+
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->request_list,
+ req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
- if (req && i915_gem_request_completed(req))
+ if (req && __i915_gem_request_completed(req))
i915_gem_request_retire(req);
/* Beware: Dragons be flying overhead.
@@ -383,7 +404,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* of being read by __i915_gem_active_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer,
- * read the request->fence.seqno and increment the reference count.
+ * read the request->global_seqno and increment the reference count.
*
* The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise,
@@ -404,19 +425,20 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* Do not use kmem_cache_zalloc() here!
*/
req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_unreserve;
+ }
- ret = i915_gem_get_seqno(dev_priv, &seqno);
- if (ret)
- goto err;
+ req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+ GEM_BUG_ON(req->timeline == engine->timeline);
spin_lock_init(&req->lock);
dma_fence_init(&req->fence,
&i915_fence_ops,
&req->lock,
- engine->fence_context,
- seqno);
+ req->timeline->fence_context,
+ __timeline_get_seqno(req->timeline->common));
i915_sw_fence_init(&req->submit, submit_notify);
@@ -426,6 +448,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = i915_gem_context_get(ctx);
/* No zalloc, must clear what we need by hand */
+ req->global_seqno = 0;
req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -438,6 +461,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* away, e.g. because a GPU scheduler has deferred it.
*/
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+ GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req);
@@ -457,8 +481,9 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
err_ctx:
i915_gem_context_put(ctx);
-err:
kmem_cache_free(dev_priv->requests, req);
+err_unreserve:
+ dev_priv->gt.active_requests--;
return ERR_PTR(ret);
}
@@ -466,15 +491,28 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
- int idx, ret;
+ int ret;
GEM_BUG_ON(to == from);
- if (to->engine == from->engine)
+ if (to->timeline == from->timeline)
return 0;
- idx = intel_engine_sync_index(from->engine, to->engine);
- if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+ if (to->engine == from->engine) {
+ ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (!from->global_seqno) {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@@ -492,7 +530,54 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
- from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+ to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+ return 0;
+}
+
+int
+i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *array;
+ int ret;
+ int i;
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return 0;
+
+ if (dma_fence_is_i915(fence))
+ return i915_gem_request_await_request(req, to_request(fence));
+
+ if (!dma_fence_is_array(fence)) {
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ fence, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
+ }
+
+ /* Note that if the fence-array was created in signal-on-any mode,
+ * we should *not* decompose it into its individual fences. However,
+ * we don't currently store which mode the fence-array is operating
+ * in. Fortunately, the only user of signal-on-any is private to
+ * amdgpu and we should not see any incoming fence-array from
+ * sync-file being in signal-on-any mode.
+ */
+
+ array = to_dma_fence_array(fence);
+ for (i = 0; i < array->num_fences; i++) {
+ struct dma_fence *child = array->fences[i];
+
+ if (dma_fence_is_i915(child))
+ ret = i915_gem_request_await_request(req,
+ to_request(child));
+ else
+ ret = i915_sw_fence_await_dma_fence(&req->submit,
+ child, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -521,40 +606,47 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write)
{
- struct i915_gem_active *active;
- unsigned long active_mask;
- int idx;
+ struct dma_fence *excl;
+ int ret = 0;
if (write) {
- active_mask = i915_gem_object_get_active(obj);
- active = obj->last_read;
+ struct dma_fence **shared;
+ unsigned int count, i;
+
+ ret = reservation_object_get_fences_rcu(obj->resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ ret = i915_gem_request_await_dma_fence(to, shared[i]);
+ if (ret)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
} else {
- active_mask = 1;
- active = &obj->last_write;
+ excl = reservation_object_get_excl_rcu(obj->resv);
}
- for_each_active(active_mask, idx) {
- struct drm_i915_gem_request *request;
- int ret;
-
- request = i915_gem_active_peek(&active[idx],
- &obj->base.dev->struct_mutex);
- if (!request)
- continue;
+ if (excl) {
+ if (ret == 0)
+ ret = i915_gem_request_await_dma_fence(to, excl);
- ret = i915_gem_request_await_request(to, request);
- if (ret)
- return ret;
+ dma_fence_put(excl);
}
- return 0;
+ return ret;
}
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- dev_priv->gt.active_engines |= intel_engine_flag(engine);
if (dev_priv->gt.awake)
return;
@@ -580,11 +672,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
+ struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
- u32 request_start;
- u32 reserved_tail;
- int ret;
+ int err;
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
/*
@@ -592,8 +684,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- request_start = ring->tail;
- reserved_tail = request->reserved_space;
request->reserved_space = 0;
/*
@@ -604,10 +694,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* what.
*/
if (flush_caches) {
- ret = engine->emit_flush(request, EMIT_FLUSH);
+ err = engine->emit_flush(request, EMIT_FLUSH);
/* Not allowed to fail! */
- WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+ WARN(err, "engine->emit_flush() failed: %d!\n", err);
}
/* Record the position of the start of the breadcrumb so that
@@ -615,20 +705,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
+ err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ GEM_BUG_ON(err);
request->postfix = ring->tail;
-
- /* Not allowed to fail! */
- ret = engine->emit_request(request);
- WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
-
- /* Sanity check that the reserved size was large enough. */
- ret = ring->tail - request_start;
- if (ret < 0)
- ret += ring->size;
- WARN_ONCE(ret > reserved_tail,
- "Not enough space reserved (%d bytes) "
- "for adding the request (%d bytes)\n",
- reserved_tail, ret);
+ ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
@@ -636,18 +716,24 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* see a more recent value in the hws than we are tracking.
*/
- prev = i915_gem_active_raw(&engine->last_request,
+ prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
if (prev)
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
- request->emitted_jiffies = jiffies;
- request->previous_seqno = engine->last_pending_seqno;
- engine->last_pending_seqno = request->fence.seqno;
- i915_gem_active_set(&engine->last_request, request);
- list_add_tail(&request->link, &engine->request_list);
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&request->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+ request->fence.seqno));
+
+ timeline->last_submitted_seqno = request->fence.seqno;
+ i915_gem_active_set(&timeline->last_request, request);
+
list_add_tail(&request->ring_link, &ring->request_list);
+ request->emitted_jiffies = jiffies;
i915_gem_mark_busy(engine);
@@ -715,7 +801,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu);
do {
- if (i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req))
return true;
if (signal_pending_state(state, current))
@@ -730,76 +816,101 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
return false;
}
+static long
+__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
+ unsigned int flags,
+ long timeout)
+{
+ const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
+ DEFINE_WAIT(reset);
+ DEFINE_WAIT(wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ add_wait_queue(q, &reset);
+
+ do {
+ prepare_to_wait(&request->submit.wait, &wait, state);
+
+ if (i915_sw_fence_done(&request->submit))
+ break;
+
+ if (flags & I915_WAIT_LOCKED &&
+ i915_reset_in_progress(&request->i915->gpu_error)) {
+ __set_current_state(TASK_RUNNING);
+ i915_reset(request->i915);
+ reset_wait_queue(q, &reset);
+ continue;
+ }
+
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
+
+ timeout = io_schedule_timeout(timeout);
+ } while (timeout);
+ finish_wait(&request->submit.wait, &wait);
+
+ if (flags & I915_WAIT_LOCKED)
+ remove_wait_queue(q, &reset);
+
+ return timeout;
+}
+
/**
* i915_wait_request - wait until execution of request has finished
- * @req: duh!
+ * @req: the request to wait upon
* @flags: how to wait
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: client to charge for RPS boosting
+ * @timeout: how long to wait in jiffies
+ *
+ * i915_wait_request() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
*
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
+ * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
+ * in via the flags, and vice versa if the struct_mutex is not held, the caller
+ * must not specify that the wait is locked.
*
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
*/
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
struct intel_wait wait;
- unsigned long timeout_remain;
- int ret = 0;
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
- GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ GEM_BUG_ON(debug_locks &&
+ !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
+ GEM_BUG_ON(timeout < 0);
if (i915_gem_request_completed(req))
- return 0;
+ return timeout;
- timeout_remain = MAX_SCHEDULE_TIMEOUT;
- if (timeout) {
- if (WARN_ON(*timeout < 0))
- return -EINVAL;
-
- if (*timeout == 0)
- return -ETIME;
-
- /* Record current time in case interrupted, or wedged */
- timeout_remain = nsecs_to_jiffies_timeout(*timeout);
- *timeout += ktime_get_raw_ns();
- }
+ if (!timeout)
+ return -ETIME;
trace_i915_gem_request_wait_begin(req);
- /* This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
- gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+ if (!i915_sw_fence_done(&req->submit)) {
+ timeout = __i915_request_wait_for_submit(req, flags, timeout);
+ if (timeout < 0)
+ goto complete;
+
+ GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+ }
+ GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
@@ -809,7 +920,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- intel_wait_init(&wait, req->fence.seqno);
+ intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
@@ -819,16 +930,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
if (signal_pending_state(state, current)) {
- ret = -ERESTARTSYS;
+ timeout = -ERESTARTSYS;
break;
}
- timeout_remain = io_schedule_timeout(timeout_remain);
- if (timeout_remain == 0) {
- ret = -ETIME;
+ if (!timeout) {
+ timeout = -ETIME;
break;
}
+ timeout = io_schedule_timeout(timeout);
+
if (intel_wait_complete(&wait))
break;
@@ -875,74 +987,39 @@ wakeup:
complete:
trace_i915_gem_request_wait_end(req);
- if (timeout) {
- *timeout -= ktime_get_raw_ns();
- if (*timeout < 0)
- *timeout = 0;
-
- /*
- * Apparently ktime isn't accurate enough and occasionally has a
- * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
- * things up to make the test happy. We allow up to 1 jiffy.
- *
- * This is a regrssion from the timespec->ktime conversion.
- */
- if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
- *timeout = 0;
- }
-
- if (IS_RPS_USER(rps) &&
- req->fence.seqno == req->engine->last_submitted_seqno) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&req->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&req->i915->rps.client_lock);
- }
-
- return ret;
+ return timeout;
}
-static bool engine_retire_requests(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request, *next;
- list_for_each_entry_safe(request, next, &engine->request_list, link) {
- if (!i915_gem_request_completed(request))
- return false;
+ list_for_each_entry_safe(request, next,
+ &engine->timeline->requests, link) {
+ if (!__i915_gem_request_completed(request))
+ return;
i915_gem_request_retire(request);
}
-
- return true;
}
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (dev_priv->gt.active_engines == 0)
+ if (!dev_priv->gt.active_requests)
return;
GEM_BUG_ON(!dev_priv->gt.awake);
- for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
- if (engine_retire_requests(engine))
- dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+ for_each_engine(engine, dev_priv, id)
+ engine_retire_requests(engine);
- if (dev_priv->gt.active_engines == 0)
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
- msecs_to_jiffies(100));
+ if (!dev_priv->gt.active_requests)
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->gt.idle_work,
+ msecs_to_jiffies(100));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index bceeaa3..75f8360 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -81,11 +81,14 @@ struct drm_i915_gem_request {
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
struct intel_signal_node signaling;
struct i915_sw_fence submit;
wait_queue_t submitq;
+ u32 global_seqno;
+
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
* this request.
@@ -147,7 +150,7 @@ struct drm_i915_gem_request {
extern const struct dma_fence_ops i915_fence_ops;
-static inline bool fence_is_i915(struct dma_fence *fence)
+static inline bool dma_fence_is_i915(const struct dma_fence *fence)
{
return fence->ops == &i915_fence_ops;
}
@@ -162,7 +165,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
static inline u32
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
{
- return req ? req->fence.seqno : 0;
+ return req ? req->global_seqno : 0;
}
static inline struct intel_engine_cs *
@@ -176,7 +179,7 @@ to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
- GEM_BUG_ON(fence && !fence_is_i915(fence));
+ GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
return container_of(fence, struct drm_i915_gem_request, fence);
}
@@ -214,6 +217,8 @@ int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
bool write);
+int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+ struct dma_fence *fence);
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
@@ -226,13 +231,13 @@ struct intel_rps_client;
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
-int i915_wait_request(struct drm_i915_gem_request *req,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -245,17 +250,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+__i915_gem_request_started(const struct drm_i915_gem_request *req)
{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
}
static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+i915_gem_request_started(const struct drm_i915_gem_request *req)
+{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_started(req);
+}
+
+static inline bool
+__i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
+ GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->fence.seqno);
+ req->global_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ if (!req->global_seqno)
+ return false;
+
+ return __i915_gem_request_completed(req);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
@@ -263,7 +288,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
- return (i915_gem_request_started(request) &&
+ return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us));
}
@@ -552,53 +577,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
}
/**
- * i915_gem_active_is_idle - report whether the active tracker is idle
- * @active - the active tracker
- *
- * i915_gem_active_is_idle() returns true if the active tracker is currently
- * unassigned or if the request is complete (but not yet retired). Requires
- * the caller to hold struct_mutex (but that can be relaxed if desired).
- */
-static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return !i915_gem_active_peek(active, mutex);
-}
-
-/**
* i915_gem_active_wait - waits until the request is completed
* @active - the active request on which to wait
- *
- * i915_gem_active_wait() waits until the request is completed before
- * returning. Note that it does not guarantee that the request is
- * retired first, see i915_gem_active_retire().
- *
- * i915_gem_active_wait() returns immediately if the active
- * request is already complete.
- */
-static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
-{
- struct drm_i915_gem_request *request;
-
- request = i915_gem_active_peek(active, mutex);
- if (!request)
- return 0;
-
- return i915_wait_request(request,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
-}
-
-/**
- * i915_gem_active_wait_unlocked - waits until the request is completed
- * @active - the active request on which to wait
* @flags - how to wait
* @timeout - how long to wait at most
* @rps - userspace client to charge for a waitboost
*
- * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * i915_gem_active_wait() waits until the request is completed before
* returning, without requiring any locks to be held. Note that it does not
* retire any requests before returning.
*
@@ -614,21 +599,18 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
* Returns 0 if successful, or a negative error code.
*/
static inline int
-i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
- unsigned int flags,
- s64 *timeout,
- struct intel_rps_client *rps)
+i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
struct drm_i915_gem_request *request;
- int ret = 0;
+ long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
- ret = i915_wait_request(request, flags, timeout, rps);
+ ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
i915_gem_request_put(request);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
/**
@@ -645,7 +627,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
struct drm_i915_gem_request *request;
- int ret;
+ long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
@@ -653,8 +635,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
- if (ret)
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
return ret;
list_del_init(&active->link);
@@ -665,24 +647,6 @@ i915_gem_active_retire(struct i915_gem_active *active,
return 0;
}
-/* Convenience functions for peeking at state inside active's request whilst
- * guarded by the struct_mutex.
- */
-
-static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
-}
-
-static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active,
- struct mutex *mutex)
-{
- return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
-}
-
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index de25b6e..a6fc1bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,6 +48,20 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ *unlock = false;
+ } else {
+ *unlock = true;
+ }
+
+ return true;
+}
+
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@@ -66,8 +80,11 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- /* Only shmemfs objects are backed by swap */
- if (!obj->base.filp)
+ if (!obj->mm.pages)
+ return false;
+
+ /* Consider only shrinkable ojects. */
+ if (!i915_gem_object_is_shrinkable(obj))
return false;
/* Only report true if by unbinding the object and putting its pages
@@ -78,7 +95,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (obj->pages_pin_count > obj->bind_count)
+ if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
if (any_vma_pinned(obj))
@@ -88,7 +105,14 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
- return swap_available() || obj->madv == I915_MADV_DONTNEED;
+ return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
+}
+
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ return !READ_ONCE(obj->mm.pages);
}
/**
@@ -128,6 +152,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
+ return 0;
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@@ -171,15 +199,19 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- global_list))) {
- list_move_tail(&obj->global_list, &still_in_list);
+ global_link))) {
+ list_move_tail(&obj->global_link, &still_in_list);
+ if (!obj->mm.pages) {
+ list_del_init(&obj->global_link);
+ continue;
+ }
if (flags & I915_SHRINK_PURGEABLE &&
- obj->madv != I915_MADV_DONTNEED)
+ obj->mm.madv != I915_MADV_DONTNEED)
continue;
if (flags & I915_SHRINK_VMAPS &&
- !is_vmalloc_addr(obj->mapping))
+ !is_vmalloc_addr(obj->mm.mapping))
continue;
if (!(flags & I915_SHRINK_ACTIVE) &&
@@ -190,22 +222,28 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj))
continue;
- i915_gem_object_get(obj);
-
- /* For the unbound phase, this should be a no-op! */
- i915_gem_object_unbind(obj);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
-
- i915_gem_object_put(obj);
+ if (unsafe_drop_pages(obj)) {
+ /* May arrive from get_pages on another bo */
+ mutex_lock_nested(&obj->mm.lock,
+ I915_MM_SHRINKER);
+ if (!obj->mm.pages) {
+ __i915_gem_object_invalidate(obj);
+ list_del_init(&obj->global_link);
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+ mutex_unlock(&obj->mm.lock);
+ }
}
- list_splice(&still_in_list, phase->list);
+ list_splice_tail(&still_in_list, phase->list);
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
+ if (unlock)
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
@@ -239,19 +277,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
return freed;
}
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
-
- *unlock = false;
- } else
- *unlock = true;
-
- return true;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -268,11 +293,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_retire_requests(dev_priv);
count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -373,13 +398,19 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+ if (!obj->mm.pages)
+ continue;
+
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f4f6d3a..b1d367d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -109,7 +109,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*
*/
base = 0;
- if (INTEL_INFO(dev)->gen >= 3) {
+ if (INTEL_GEN(dev_priv) >= 3) {
u32 bsm;
pci_read_config_dword(pdev, INTEL_BSM, &bsm);
@@ -138,7 +138,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I865_TOUD, &toud);
base = (toud << 16) + tseg_size;
- } else if (IS_I85X(dev)) {
+ } else if (IS_I85X(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -546,25 +546,29 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st;
}
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- BUG();
- return -EINVAL;
+ return i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
}
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
/* Should only be called during free */
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
-
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ __i915_gem_object_unpin_pages(obj);
+
if (obj->stolen) {
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen);
@@ -590,20 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
- obj->pages = i915_pages_create_for_stolen(dev,
- stolen->start, stolen->size);
- if (obj->pages == NULL)
- goto cleanup;
-
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
-
- i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
-
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ if (i915_gem_object_pin_pages(obj))
+ goto cleanup;
+
return obj;
cleanup:
@@ -698,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err;
+ goto err_pages;
}
/* To simplify the initialisation sequence between KMS and GTT,
@@ -715,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- goto err;
+ goto err_pages;
}
- vma->pages = obj->pages;
+ vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
obj->bind_count++;
- list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- i915_gem_object_pin_pages(obj);
-
return obj;
+err_pages:
+ i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c21bc00..251d51b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -201,12 +201,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY;
@@ -261,14 +259,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!err) {
struct i915_vma *vma;
- if (obj->pages &&
- obj->madv == I915_MADV_WILLNEED &&
+ mutex_lock(&obj->mm.lock);
+ if (obj->mm.pages &&
+ obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (args->tiling_mode == I915_TILING_NONE)
- i915_gem_object_unpin_pages(obj);
- if (!i915_gem_object_is_tiled(obj))
- i915_gem_object_pin_pages(obj);
+ if (args->tiling_mode == I915_TILING_NONE) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (!i915_gem_object_is_tiled(obj)) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
}
+ mutex_unlock(&obj->mm.lock);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!vma->fence)
@@ -302,8 +308,6 @@ err:
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
return err;
}
@@ -327,12 +331,19 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (obj) {
+ args->tiling_mode =
+ READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+ err = 0;
+ }
+ rcu_read_unlock();
+ if (unlikely(err))
+ return err;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
switch (args->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -340,11 +351,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
+ default:
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
- default:
- DRM_ERROR("unknown tiling mode\n");
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
@@ -357,6 +367,5 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- i915_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_timeline.c
index 9131555..fc8f13a 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Intel Corporation
+ * Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -17,29 +17,49 @@
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*
*/
-#ifndef _I915_GEM_DMABUF_H_
-#define _I915_GEM_DMABUF_H_
+#include "i915_drv.h"
-#include <linux/dma-buf.h>
-
-static inline struct reservation_object *
-i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *timeline,
+ const char *name)
{
- struct dma_buf *dma_buf;
+ unsigned int i;
+ u64 fences;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ timeline->i915 = i915;
+ timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
+ if (!timeline->name)
+ return -ENOMEM;
+
+ list_add(&timeline->link, &i915->gt.timelines);
+
+ /* Called during early_init before we know how many engines there are */
+ fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
- if (obj->base.dma_buf)
- dma_buf = obj->base.dma_buf;
- else if (obj->base.import_attach)
- dma_buf = obj->base.import_attach->dmabuf;
- else
- return NULL;
+ tl->fence_context = fences++;
+ tl->common = timeline;
- return dma_buf->resv;
+ spin_lock_init(&tl->lock);
+ init_request_active(&tl->last_request, NULL);
+ INIT_LIST_HEAD(&tl->requests);
+ }
+
+ return 0;
}
-#endif
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+{
+ lockdep_assert_held(&tl->i915->drm.struct_mutex);
+
+ list_del(&tl->link);
+ kfree(tl->name);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
new file mode 100644
index 0000000..f2bf7b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_TIMELINE_H
+#define I915_GEM_TIMELINE_H
+
+#include <linux/list.h>
+
+#include "i915_gem_request.h"
+
+struct i915_gem_timeline;
+
+struct intel_timeline {
+ u64 fence_context;
+ u32 last_submitted_seqno;
+
+ spinlock_t lock;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_gem_active_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_gem_active last_request;
+ u32 sync_seqno[I915_NUM_ENGINES];
+
+ struct i915_gem_timeline *common;
+};
+
+struct i915_gem_timeline {
+ struct list_head link;
+ atomic_t next_seqno;
+
+ struct drm_i915_private *i915;
+ const char *name;
+
+ struct intel_timeline engine[I915_NUM_ENGINES];
+};
+
+int i915_gem_timeline_init(struct drm_i915_private *i915,
+ struct i915_gem_timeline *tl,
+ const char *name);
+void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index c6f780f..6426163 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -61,33 +61,26 @@ struct i915_mmu_object {
bool attached;
};
-static void wait_rendering(struct drm_i915_gem_object *obj)
-{
- unsigned long active = __I915_BO_ACTIVE(obj);
- int idx;
-
- for_each_active(active, idx)
- i915_gem_active_wait_unlocked(&obj->last_read[idx],
- 0, NULL, NULL);
-}
-
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
struct drm_device *dev = obj->base.dev;
- wait_rendering(obj);
+ i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
- if (obj->pages != NULL) {
- /* We are inside a kthread context and can't be interrupted */
- WARN_ON(i915_gem_object_unbind(obj));
- WARN_ON(i915_gem_object_put_pages(obj));
- }
+ /* We are inside a kthread context and can't be interrupted */
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ WARN_ONCE(obj->mm.pages,
+ "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+ obj->bind_count,
+ atomic_read(&obj->mm.pages_pin_count),
+ obj->pin_display);
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
@@ -436,24 +429,25 @@ err:
return ret;
}
-static int
+static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
{
+ struct sg_table *pages;
int ret;
- ret = st_set_pages(&obj->pages, pvec, num_pages);
+ ret = st_set_pages(&pages, pvec, num_pages);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- ret = i915_gem_gtt_prepare_object(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, pages);
if (ret) {
- sg_free_table(obj->pages);
- kfree(obj->pages);
- obj->pages = NULL;
+ sg_free_table(pages);
+ kfree(pages);
+ return ERR_PTR(ret);
}
- return ret;
+ return pages;
}
static int
@@ -497,7 +491,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- struct drm_device *dev = obj->base.dev;
const int npages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
int pinned, ret;
@@ -533,33 +526,32 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
}
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&obj->mm.lock);
if (obj->userptr.work == &work->work) {
+ struct sg_table *pages = ERR_PTR(ret);
+
if (pinned == npages) {
- ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
- if (ret == 0) {
- list_add_tail(&obj->global_list,
- &to_i915(dev)->mm.unbound_list);
- obj->get_page.sg = obj->pages->sgl;
- obj->get_page.last = 0;
+ pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+ if (!IS_ERR(pages)) {
+ __i915_gem_object_set_pages(obj, pages);
pinned = 0;
+ pages = NULL;
}
}
- obj->userptr.work = ERR_PTR(ret);
- }
- obj->userptr.workers--;
- i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
+ obj->userptr.work = ERR_CAST(pages);
+ }
+ mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
+ i915_gem_object_put(obj);
put_task_struct(work->task);
kfree(work);
}
-static int
+static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active)
{
@@ -584,15 +576,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
* that error back to this function through
* obj->userptr.work = ERR_PTR.
*/
- if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
- return -EAGAIN;
-
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
obj->userptr.work = &work->work;
- obj->userptr.workers++;
work->obj = i915_gem_object_get(obj);
@@ -603,14 +591,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
schedule_work(&work->work);
*active = true;
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
-static int
+static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
+ struct sg_table *pages;
int pinned, ret;
bool active;
@@ -634,15 +623,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return PTR_ERR(obj->userptr.work);
+ return ERR_CAST(obj->userptr.work);
else
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
pvec = NULL;
pinned = 0;
@@ -651,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_TEMPORARY);
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -660,21 +649,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
active = false;
if (pinned < 0)
- ret = pinned, pinned = 0;
+ pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages)
- ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+ pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else
- ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
- if (ret) {
+ pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+ if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0);
}
drm_free_large(pvec);
- return ret;
+ return pages;
}
static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
@@ -682,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
- if (obj->madv != I915_MADV_WILLNEED)
- obj->dirty = 0;
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ obj->mm.dirty = false;
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
- for_each_sgt_page(page, sgt_iter, obj->pages) {
- if (obj->dirty)
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
}
- obj->dirty = 0;
+ obj->mm.dirty = false;
- sg_free_table(obj->pages);
- kfree(obj->pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
@@ -717,7 +707,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
@@ -816,7 +807,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 242b9a9..204093f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -415,17 +415,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
- err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[0],
- ee->semaphore_seqno[0]);
- err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[1],
- ee->semaphore_seqno[1]);
- if (HAS_VEBOX(m->i915)) {
- err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
- ee->semaphore_mboxes[2],
- ee->semaphore_seqno[2]);
- }
+ err_printf(m, " SYNC_0: 0x%08x\n",
+ ee->semaphore_mboxes[0]);
+ err_printf(m, " SYNC_1: 0x%08x\n",
+ ee->semaphore_mboxes[1]);
+ if (HAS_VEBOX(m->i915))
+ err_printf(m, " SYNC_2: 0x%08x\n",
+ ee->semaphore_mboxes[2]);
}
if (USES_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -546,9 +542,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "Time: %ld s %ld us\n",
+ error->time.tv_sec, error->time.tv_usec);
+ err_printf(m, "Boottime: %ld s %ld us\n",
+ error->boottime.tv_sec, error->boottime.tv_usec);
+ err_printf(m, "Uptime: %ld s %ld us\n",
+ error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
max_hangcheck_score = 0;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -702,6 +702,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "Semaphores", error->semaphore);
+ print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
+
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -782,6 +784,7 @@ static void i915_error_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore);
+ i915_error_object_free(error->guc_log);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
@@ -880,17 +883,17 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
- err->wseqno = __active_get_seqno(&obj->last_write);
- err->engine = __active_get_engine_id(&obj->last_write);
+ err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
+ err->wseqno = __active_get_seqno(&vma->last_write);
+ err->engine = __active_get_engine_id(&vma->last_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->dirty = obj->mm.dirty;
+ err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level;
}
@@ -965,6 +968,26 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
}
}
+static inline u32
+gen8_engine_sync_index(struct intel_engine_cs *engine,
+ struct intel_engine_cs *other)
+{
+ int idx;
+
+ /*
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
+ */
+
+ idx = (other - engine) - 1;
+ if (idx < 0)
+ idx += I915_NUM_ENGINES;
+
+ return idx;
+}
static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
@@ -988,10 +1011,9 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
tmp = error->semaphore->pages[0];
- idx = intel_engine_sync_index(engine, to);
+ idx = gen8_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
- ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
@@ -1002,14 +1024,9 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
- ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
-
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_VEBOX(dev_priv))
ee->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
- ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
- }
}
static void error_record_engine_waiters(struct intel_engine_cs *engine,
@@ -1026,7 +1043,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (RB_EMPTY_ROOT(&b->waiters))
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
@@ -1034,7 +1051,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
waiter = NULL;
if (count)
@@ -1044,7 +1061,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
- if (!spin_trylock(&b->lock)) {
+ if (!spin_trylock_irq(&b->lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
@@ -1062,7 +1079,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (++ee->num_waiters == count)
break;
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static void error_record_engine_registers(struct drm_i915_error_state *error,
@@ -1103,7 +1120,7 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = engine->last_submitted_seqno;
+ ee->last_seqno = intel_engine_last_submit(engine);
ee->start = I915_READ_START(engine);
ee->head = I915_READ_HEAD(engine);
ee->tail = I915_READ_TAIL(engine);
@@ -1169,7 +1186,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
- erq->seqno = request->fence.seqno;
+ erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
erq->tail = request->tail;
@@ -1188,7 +1205,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link)
+ list_for_each_entry_from(request, &engine->timeline->requests, link)
count++;
if (!count)
return;
@@ -1201,7 +1218,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->request_list, link) {
+ list_for_each_entry_from(request, &engine->timeline->requests, link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1408,6 +1425,17 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
+static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ /* Capturing log buf contents won't be useful if logging was disabled */
+ if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
+ return;
+
+ error->guc_log = i915_error_object_create(dev_priv,
+ dev_priv->guc.log.vma);
+}
+
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
@@ -1532,8 +1560,13 @@ static int capture(void *data)
i915_gem_record_rings(error->i915, error);
i915_capture_active_buffers(error->i915, error);
i915_capture_pinned_buffers(error->i915, error);
+ i915_gem_capture_guc_log_buffer(error->i915, error);
do_gettimeofday(&error->time);
+ error->boottime = ktime_to_timeval(ktime_get_boottime());
+ error->uptime =
+ ktime_to_timeval(ktime_sub(ktime_get(),
+ error->i915->gt.last_init_time));
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index a1f76c8..666dab7 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -23,6 +23,8 @@
*/
#include <linux/firmware.h>
#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
#include "i915_drv.h"
#include "intel_guc.h"
@@ -85,6 +87,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
if (WARN_ON(len < 1 || len > 15))
return -EINVAL;
+ mutex_lock(&guc->action_lock);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dev_priv->guc.action_count += 1;
@@ -123,6 +126,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
dev_priv->guc.action_status = status;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&guc->action_lock);
return ret;
}
@@ -170,6 +174,35 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
+{
+ u32 data[1];
+
+ data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
+
+ return host2guc_action(guc, data, 1);
+}
+
+static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
+ data[1] = 0;
+
+ return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
+{
+ u32 data[2];
+
+ data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
+ data[1] = control_val;
+
+ return host2guc_action(guc, data, 2);
+}
+
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
@@ -187,7 +220,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
struct guc_context_desc desc;
size_t len;
- doorbell = client->client_base + client->doorbell_offset;
+ doorbell = client->vaddr + client->doorbell_offset;
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
test_bit(client->doorbell_id, doorbell_bitmap)) {
@@ -293,7 +326,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
{
struct guc_process_desc *desc;
- desc = client->client_base + client->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@@ -380,8 +413,8 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
gfx_addr = i915_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc.db_trigger_cpu = (uintptr_t)client->client_base +
- client->doorbell_offset;
+ desc.db_trigger_cpu =
+ (uintptr_t)client->vaddr + client->doorbell_offset;
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
desc.process_desc = gfx_addr + client->proc_desc_offset;
desc.wq_addr = gfx_addr + client->wq_offset;
@@ -432,7 +465,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
+ struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
u32 freespace;
int ret;
@@ -473,10 +506,9 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
struct intel_engine_cs *engine = rq->engine;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
- void *base;
- u32 freespace, tail, wq_off, wq_page;
+ u32 freespace, tail, wq_off;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
@@ -506,10 +538,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
gc->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
- wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
- wq_off &= PAGE_SIZE - 1;
- base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
- wqi = (struct guc_wq_item *)((char *)base + wq_off);
+ wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
@@ -521,9 +550,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = rq->fence.seqno;
-
- kunmap_atomic(base);
+ wqi->fence_id = rq->global_seqno;
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -533,7 +560,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = gc->client_base + gc->proc_desc_offset;
+ desc = gc->vaddr + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@@ -549,7 +576,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = gc->client_base + gc->doorbell_offset;
+ db = gc->vaddr + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -601,6 +628,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
*/
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
+ struct drm_i915_private *dev_priv = rq->i915;
unsigned int engine_id = rq->engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
@@ -608,6 +636,11 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
+
+ /* WA to flush out the pending GMADR writes to ring buffer. */
+ if (i915_vma_is_map_and_fenceable(rq->ring->vma))
+ POSTING_READ_FW(GUC_STATUS);
+
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@@ -616,7 +649,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
client->b_fail += 1;
guc->submissions[engine_id] += 1;
- guc->last_seqno[engine_id] = rq->fence.seqno;
+ guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock(&client->wq_lock);
}
@@ -685,14 +718,14 @@ guc_client_free(struct drm_i915_private *dev_priv,
* Be sure to drop any locks
*/
- if (client->client_base) {
+ if (client->vaddr) {
/*
* If we got as far as setting up a doorbell, make sure we
* shut it down before unmapping & deallocating the memory.
*/
guc_disable_doorbell(guc, client);
- kunmap(kmap_to_page(client->client_base));
+ i915_gem_object_unpin_map(client->vma->obj);
}
i915_vma_unpin_and_release(&client->vma);
@@ -781,6 +814,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct i915_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ void *vaddr;
uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -807,7 +841,12 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
- client->client_base = kmap(i915_vma_first_page(vma));
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ goto err;
+
+ client->vaddr = vaddr;
spin_lock_init(&client->wq_lock);
client->wq_offset = GUC_DB_SIZE;
@@ -847,15 +886,411 @@ err:
return NULL;
}
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ /* Use no-overwrite mode by default, where relay will stop accepting
+ * new data if there are no empty sub buffers left.
+ * There is no strict synchronization enforced by relay between Consumer
+ * and Producer. In overwrite mode, there is a possibility of getting
+ * inconsistent/garbled data, the producer could be writing on to the
+ * same sub buffer from which Consumer is reading. This can't be avoided
+ * unless Consumer is fast enough and can always run in tandem with
+ * Producer.
+ */
+ if (relay_buf_full(buf))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ /* This to enable the use of a single buffer for the relay channel and
+ * correspondingly have a single file exposed to User, through which
+ * it can collect the logs in order without any post-processing.
+ * Need to set 'is_global' even if parent is NULL for early logging.
+ */
+ *is_global = 1;
+
+ if (!parent)
+ return NULL;
+
+ /* Not using the channel filename passed as an argument, since for each
+ * channel relay appends the corresponding CPU number to the filename
+ * passed in relay_open(). This should be fine as relay just needs a
+ * dentry of the file associated with the channel buffer and that file's
+ * name need not be same as the filename passed as an argument.
+ */
+ buf_file = debugfs_create_file("guc_log", mode,
+ parent, buf, &relay_file_operations);
+ return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_callback,
+ .create_buf_file = create_buf_file_callback,
+ .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+ relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
+
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
+
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ return -ENOMEM;
+ }
+
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.relay_chan = guc_log_relay_chan;
+ return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct dentry *log_dir;
+ int ret;
+
+ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
+ log_dir = dev_priv->drm.primary->debugfs_root;
+
+ /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+ * not mounted and so can't create the relay file.
+ * The relay API seems to fit well with debugfs only, for availing relay
+ * there are 3 requirements which can be met for debugfs file only in a
+ * straightforward/clean manner :-
+ * i) Need the associated dentry pointer of the file, while opening the
+ * relay channel.
+ * ii) Should be able to use 'relay_file_operations' fops for the file.
+ * iii) Set the 'i_private' field of file's inode to the pointer of
+ * relay channel buffer.
+ */
+ if (!log_dir) {
+ DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+ return -ENODEV;
+ }
+
+ ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+ if (ret) {
+ DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+ /* Make sure the updates made in the sub buffer are visible when
+ * Consumer sees the following update to offset inside the sub buffer.
+ */
+ smp_wmb();
+
+ /* All data has been written, so now move the offset of sub buffer. */
+ relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+ /* Switch to the next sub buffer */
+ relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+ if (!guc->log.relay_chan)
+ return NULL;
+
+ /* Just get the base address of a new sub buffer and copy data into it
+ * ourselves. NULL will be returned in no-overwrite mode, if all sub
+ * buffers are full. Could have used the relay_write() to indirectly
+ * copy the data, but that would have been bit convoluted, as we need to
+ * write to only certain locations inside a sub buffer which cannot be
+ * done without using relay_reserve() along with relay_write(). So its
+ * better to use relay_reserve() alone.
+ */
+ return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool
+guc_check_log_buf_overflow(struct intel_guc *guc,
+ enum guc_log_buffer_type type, unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ guc->log.prev_overflow_count[type] = full_cnt;
+ guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ guc->log.total_overflow_count[type] += 16;
+ }
+ DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+ }
+
+ return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ case GUC_DPC_LOG_BUFFER:
+ return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ default:
+ MISSING_CASE(type);
+ }
+
+ return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+ struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+ struct guc_log_buffer_state log_buf_state_local;
+ enum guc_log_buffer_type type;
+ void *src_data, *dst_data;
+ bool new_overflow;
+
+ if (WARN_ON(!guc->log.buf_addr))
+ return;
+
+ /* Get the pointer to shared GuC log buffer */
+ log_buf_state = src_data = guc->log.buf_addr;
+
+ /* Get the pointer to local buffer to store the logs */
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+ /* Actual logs are present from the 2nd page */
+ src_data += PAGE_SIZE;
+ dst_data += PAGE_SIZE;
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state,
+ sizeof(struct guc_log_buffer_state));
+ buffer_size = guc_get_log_buffer_size(type);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_cnt = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+ new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+ /* Update the state of shared log buffer */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ log_buf_state++;
+
+ if (unlikely(!log_buf_snapshot_state))
+ continue;
+
+ /* First copy the state structure in snapshot buffer */
+ memcpy(log_buf_snapshot_state, &log_buf_state_local,
+ sizeof(struct guc_log_buffer_state));
+
+ /* The write pointer could have been updated by GuC firmware,
+ * after sending the flush interrupt to Host, for consistency
+ * set write pointer value to same value of sampled_write_ptr
+ * in the snapshot buffer.
+ */
+ log_buf_snapshot_state->write_ptr = write_offset;
+ log_buf_snapshot_state++;
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ DRM_ERROR("invalid log buffer state\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ /* Just copy the newly written data */
+ if (read_offset > write_offset) {
+ i915_memcpy_from_wc(dst_data, src_data, write_offset);
+ bytes_to_copy = buffer_size - read_offset;
+ } else {
+ bytes_to_copy = write_offset - read_offset;
+ }
+ i915_memcpy_from_wc(dst_data + read_offset,
+ src_data + read_offset, bytes_to_copy);
+
+ src_data += buffer_size;
+ dst_data += buffer_size;
+ }
+
+ if (log_buf_snapshot_state)
+ guc_move_to_next_buf(guc);
+ else {
+ /* Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ guc->log.capture_miss_count++;
+ }
+}
+
+static void guc_capture_logs_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, guc.log.flush_work);
+
+ i915_guc_capture_logs(dev_priv);
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* First disable the flush interrupt */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ if (guc->log.flush_wq)
+ destroy_workqueue(guc->log.flush_wq);
+
+ guc->log.flush_wq = NULL;
+
+ if (guc->log.relay_chan)
+ guc_log_remove_relay_file(guc);
+
+ guc->log.relay_chan = NULL;
+
+ if (guc->log.buf_addr)
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+
+ guc->log.buf_addr = NULL;
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ void *vaddr;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Nothing to do */
+ if (i915.guc_log_level < 0)
+ return 0;
+
+ if (!guc->log.buf_addr) {
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return ret;
+ }
+
+ guc->log.buf_addr = vaddr;
+ }
+
+ if (!guc->log.relay_chan) {
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ ret = guc_log_create_relay_channel(guc);
+ if (ret)
+ return ret;
+ }
+
+ if (!guc->log.flush_wq) {
+ INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (guc->log.flush_wq == NULL) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
static void guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
- if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
- return;
-
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
@@ -865,8 +1300,18 @@ static void guc_log_create(struct intel_guc *guc)
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
- vma = guc->log_vma;
+ vma = guc->log.vma;
if (!vma) {
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
+ /* logging will not be enabled */
+ i915.guc_log_level = -1;
+ return;
+ }
+
vma = guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
/* logging will be off */
@@ -874,7 +1319,14 @@ static void guc_log_create(struct intel_guc *guc)
return;
}
- guc->log_vma = vma;
+ guc->log.vma = vma;
+
+ if (guc_log_create_extras(guc)) {
+ guc_log_cleanup(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+ i915.guc_log_level = -1;
+ return;
+ }
}
/* each allocated unit is a page */
@@ -884,7 +1336,37 @@ static void guc_log_create(struct intel_guc *guc)
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (i915.guc_log_level < 0)
+ return -EINVAL;
+
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_create_extras(guc);
+ if (ret)
+ goto err;
+
+ ret = guc_log_create_relay_file(guc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ guc_log_cleanup(guc);
+ /* logging will remain off */
+ i915.guc_log_level = -1;
+ return ret;
}
static void guc_policies_init(struct guc_policies *policies)
@@ -1006,6 +1488,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
+ mutex_init(&guc->action_lock);
guc_log_create(guc);
guc_addon_create(guc);
@@ -1039,7 +1522,8 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
engine->submit_request = i915_guc_submit;
/* Replay the current set of previously submitted requests */
- list_for_each_entry(request, &engine->request_list, link) {
+ list_for_each_entry(request,
+ &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
if (i915_sw_fence_done(&request->submit))
i915_guc_submit(request);
@@ -1068,7 +1552,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
i915_vma_unpin_and_release(&guc->ads_vma);
- i915_vma_unpin_and_release(&guc->log_vma);
+ i915_vma_unpin_and_release(&guc->log.vma);
if (guc->ctx_pool_vma)
ida_destroy(&guc->ctx_ids);
@@ -1089,6 +1573,8 @@ int intel_guc_suspend(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ gen9_disable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
@@ -1115,6 +1601,9 @@ int intel_guc_resume(struct drm_device *dev)
if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
@@ -1124,3 +1613,104 @@ int intel_guc_resume(struct drm_device *dev)
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
+
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
+{
+ guc_read_update_log_buffer(&dev_priv->guc);
+
+ /* Generally device is expected to be active only at this
+ * time, so get/put should be really quick.
+ */
+ intel_runtime_pm_get(dev_priv);
+ host2guc_logbuffer_flush_complete(&dev_priv->guc);
+ intel_runtime_pm_put(dev_priv);
+}
+
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ return;
+
+ /* First disable the interrupts, will be renabled afterwards */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ /* Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&dev_priv->guc.log.flush_work);
+
+ /* Ask GuC to update the log buffer state */
+ host2guc_force_logbuffer_flush(&dev_priv->guc);
+
+ /* GuC would have updated log buffer by now, so capture it */
+ i915_guc_capture_logs(dev_priv);
+}
+
+void i915_guc_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_cleanup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_register(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_late_setup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+ union guc_log_control log_param;
+ int ret;
+
+ log_param.value = control_val;
+
+ if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+ log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+ return -EINVAL;
+
+ /* This combination doesn't make sense & won't have any effect */
+ if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ return 0;
+
+ ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
+ return ret;
+ }
+
+ i915.guc_log_level = log_param.verbosity;
+
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled.
+ */
+ if (!dev_priv->guc.log.relay_chan) {
+ ret = guc_log_late_setup(&dev_priv->guc);
+ if (!ret)
+ gen9_enable_guc_interrupts(dev_priv);
+ } else if (!log_param.logging_enabled) {
+ /* Once logging is disabled, GuC won't generate logs & send an
+ * interrupt. But there could be some data in the log buffer
+ * which is yet to be captured. So request GuC to update the log
+ * buffer state and then collect the left over logs.
+ */
+ i915_guc_flush_logs(dev_priv);
+
+ /* As logging is disabled, update log level to reflect that */
+ i915.guc_log_level = -1;
+ } else {
+ /* In case interrupts were disabled, enable them now */
+ gen9_enable_guc_interrupts(dev_priv);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 23315e5..6d7505b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -170,6 +170,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
} while (0)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
@@ -303,18 +304,18 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- new_val = dev_priv->pm_irq_mask;
+ new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->pm_irq_mask) {
- dev_priv->pm_irq_mask = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
+ if (new_val != dev_priv->pm_imr) {
+ dev_priv->pm_imr = new_val;
+ I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -322,28 +323,54 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
snb_update_pm_irq(dev_priv, mask, mask);
}
-static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t mask)
+static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
{
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
- __gen6_disable_pm_irq(dev_priv, mask);
+ __gen6_mask_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(reg, dev_priv->pm_rps_events);
- I915_WRITE(reg, dev_priv->pm_rps_events);
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ I915_WRITE(reg, reset_mask);
+ I915_WRITE(reg, reset_mask);
POSTING_READ(reg);
+}
+
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier |= enable_mask;
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ gen6_unmask_pm_irq(dev_priv, enable_mask);
+ /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
+}
+
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ dev_priv->pm_ier &= ~disable_mask;
+ __gen6_mask_pm_irq(dev_priv, disable_mask);
+ I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ /* though a barrier is missing here, but don't really need a one */
+}
+
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -357,8 +384,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
- dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -379,9 +404,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
- I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
- ~dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
@@ -395,6 +418,38 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts_enabled) {
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
+ dev_priv->pm_guc_events);
+ dev_priv->guc.interrupts_enabled = true;
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts_enabled = false;
+
+ gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen9_reset_guc_interrupts(dev_priv);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -670,8 +725,8 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
htotal = mode->crtc_htotal;
@@ -776,8 +831,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
@@ -912,21 +967,22 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *vblank_time,
unsigned flags)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
- if (pipe >= INTEL_INFO(dev)->num_pipes) {
+ if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
/* Get drm_crtc to timestamp: */
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
- if (!crtc->hwmode.crtc_clock) {
+ if (!crtc->base.hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
return -EBUSY;
}
@@ -934,7 +990,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
- &crtc->hwmode);
+ &crtc->base.hwmode);
}
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
@@ -1085,7 +1141,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
client_boost = dev_priv->rps.client_boost;
dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1324,11 +1380,13 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
- if (master_ctl & GEN8_GT_PM_IRQ) {
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
- if (gt_iir[2] & dev_priv->pm_rps_events) {
+ if (gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events)) {
I915_WRITE_FW(GEN8_GT_IIR(2),
- gt_iir[2] & dev_priv->pm_rps_events);
+ gt_iir[2] & (dev_priv->pm_rps_events |
+ dev_priv->pm_guc_events));
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
@@ -1360,6 +1418,9 @@ static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
if (gt_iir[2] & dev_priv->pm_rps_events)
gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+
+ if (gt_iir[2] & dev_priv->pm_guc_events)
+ gen9_guc_irq_handler(dev_priv, gt_iir[2]);
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -1586,7 +1647,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&dev_priv->rps.work);
@@ -1606,6 +1667,41 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
}
}
+static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+{
+ if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
+ /* Sample the log buffer flush related bits & clear them out now
+ * itself from the message identity register to minimize the
+ * probability of losing a flush interrupt, when there are back
+ * to back flush interrupts.
+ * There can be a new flush interrupt, for different log buffer
+ * type (like for ISR), whilst Host is handling one (for DPC).
+ * Since same bit is used in message register for ISR & DPC, it
+ * could happen that GuC sets the bit for 2nd interrupt but Host
+ * clears out the bit on handling the 1st interrupt.
+ */
+ u32 msg, flush;
+
+ msg = I915_READ(SOFT_SCRATCH(15));
+ flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER);
+ if (flush) {
+ /* Clear the message bits that are handled */
+ I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
+
+ /* Handle flush interrupt in bottom half */
+ queue_work(dev_priv->guc.log.flush_wq,
+ &dev_priv->guc.log.flush_work);
+
+ dev_priv->guc.log.flush_interrupt_count++;
+ } else {
+ /* Not clearing of unhandled event bits won't result in
+ * re-triggering of the interrupt.
+ */
+ }
+ }
+}
+
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -2408,7 +2504,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
fault_errors);
}
@@ -2752,420 +2848,6 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
- if (INTEL_GEN(engine->i915) >= 8) {
- return (ipehr >> 23) == 0x1c;
- } else {
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
- }
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
- u64 offset)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
- enum intel_engine_id id;
-
- if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv, id) {
- if (engine == signaller)
- continue;
-
- if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
- return signaller;
- }
- } else {
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
-
- for_each_engine(signaller, dev_priv, id) {
- if(engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
- }
-
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
- engine->name, ipehr, offset);
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- void __iomem *vaddr;
- u32 cmd, ipehr, head;
- u64 offset = 0;
- int i, backwards;
-
- /*
- * This function does not support execlist mode - any attempt to
- * proceed further into this function will result in a kernel panic
- * when dereferencing ring->buffer, which is not set up in execlist
- * mode.
- *
- * The correct way of doing it would be to derive the currently
- * executing ring buffer from the current context, which is derived
- * from the currently running request. Unfortunately, to get the
- * current request we would have to grab the struct_mutex before doing
- * anything else, which would be ill-advised since some other thread
- * might have grabbed it already and managed to hang itself, causing
- * the hang checker to deadlock.
- *
- * Therefore, this function does not support execlist mode in its
- * current form. Just return NULL and move on.
- */
- if (engine->buffer == NULL)
- return NULL;
-
- ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine, ipehr))
- return NULL;
-
- /*
- * HEAD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX. But limit it to just 3
- * or 4 dwords depending on the semaphore wait command size.
- * Note that we don't care about ACTHD here since that might
- * point at at batch, and semaphores are always emitted into the
- * ringbuffer itself.
- */
- head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
- vaddr = (void __iomem *)engine->buffer->vaddr;
-
- for (i = backwards; i; --i) {
- /*
- * Be paranoid and presume the hw has gone off into the wild -
- * our ring is smaller than what the hardware (and hence
- * HEAD_ADDR) allows. Also handles wrap-around.
- */
- head &= engine->buffer->size - 1;
-
- /* This here seems to blow up */
- cmd = ioread32(vaddr + head);
- if (cmd == ipehr)
- break;
-
- head -= 4;
- }
-
- if (!i)
- return NULL;
-
- *seqno = ioread32(vaddr + head + 4) + 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(vaddr + head + 12);
- offset <<= 32;
- offset |= ioread32(vaddr + head + 8);
- }
- return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
- u32 seqno;
-
- engine->hangcheck.deadlock++;
-
- signaller = semaphore_waits_for(engine, &seqno);
- if (signaller == NULL)
- return -1;
-
- if (IS_ERR(signaller))
- return 0;
-
- /* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
- return -1;
-
- if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
- return 1;
-
- /* cursory check for an unkickable deadlock */
- if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
- semaphore_passed(signaller) < 0)
- return -1;
-
- return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- engine->hangcheck.deadlock = 0;
-}
-
-static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
-{
- u32 tmp = current_instdone | *old_instdone;
- bool unchanged;
-
- unchanged = tmp == *old_instdone;
- *old_instdone |= tmp;
-
- return unchanged;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_instdone instdone;
- struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
- bool stuck;
- int slice;
- int subslice;
-
- if (engine->id != RCS)
- return true;
-
- intel_engine_get_instdone(engine, &instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = instdone_unchanged(instdone.instdone,
- &accu_instdone->instdone);
- stuck &= instdone_unchanged(instdone.slice_common,
- &accu_instdone->slice_common);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
- stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
- &accu_instdone->sampler[slice][subslice]);
- stuck &= instdone_unchanged(instdone.row[slice][subslice],
- &accu_instdone->row[slice][subslice]);
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return HANGCHECK_ACTIVE;
- }
-
- if (!subunits_stuck(engine))
- return HANGCHECK_ACTIVE;
-
- return HANGCHECK_HUNG;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != HANGCHECK_HUNG)
- return ha;
-
- if (IS_GEN2(dev_priv))
- return HANGCHECK_HUNG;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = I915_READ_CTL(engine);
- if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, 0,
- "Kicking stuck wait on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- }
-
- if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(engine)) {
- default:
- return HANGCHECK_HUNG;
- case 1:
- i915_handle_error(dev_priv, 0,
- "Kicking stuck semaphore on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
- case 0:
- return HANGCHECK_WAIT;
- }
- }
-
- return HANGCHECK_HUNG;
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void i915_hangcheck_elapsed(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int hung = 0, stuck = 0;
- int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
-
- if (!i915.enable_hangcheck)
- return;
-
- if (!READ_ONCE(dev_priv->gt.awake))
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
-
- for_each_engine(engine, dev_priv, id) {
- bool busy = intel_engine_has_waiter(engine);
- u64 acthd;
- u32 seqno;
- u32 submit;
-
- semaphore_clear_deadlocks(dev_priv);
-
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- acthd = intel_engine_get_active_head(engine);
- seqno = intel_engine_get_seqno(engine);
- submit = READ_ONCE(engine->last_submitted_seqno);
-
- if (engine->hangcheck.seqno == seqno) {
- if (i915_seqno_passed(seqno, submit)) {
- engine->hangcheck.action = HANGCHECK_IDLE;
- } else {
- /* We always increment the hangcheck score
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, add a small increment to the
- * score so that we can catch a batch that is
- * being repeatedly kicked and so responsible
- * for stalling the machine.
- */
- engine->hangcheck.action =
- engine_stuck(engine, acthd);
-
- switch (engine->hangcheck.action) {
- case HANGCHECK_IDLE:
- case HANGCHECK_WAIT:
- break;
- case HANGCHECK_ACTIVE:
- engine->hangcheck.score += BUSY;
- break;
- case HANGCHECK_KICK:
- engine->hangcheck.score += KICK;
- break;
- case HANGCHECK_HUNG:
- engine->hangcheck.score += HUNG;
- break;
- }
- }
-
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- hung |= intel_engine_flag(engine);
- if (engine->hangcheck.action != HANGCHECK_HUNG)
- stuck |= intel_engine_flag(engine);
- }
- } else {
- engine->hangcheck.action = HANGCHECK_ACTIVE;
-
- /* Gradually reduce the count so that we catch DoS
- * attempts across multiple batches.
- */
- if (engine->hangcheck.score > 0)
- engine->hangcheck.score -= ACTIVE_DECAY;
- if (engine->hangcheck.score < 0)
- engine->hangcheck.score = 0;
-
- /* Clear head and subunit states on seqno movement */
- acthd = 0;
-
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
- }
-
- engine->hangcheck.seqno = seqno;
- engine->hangcheck.acthd = acthd;
- busy_count += busy;
- }
-
- if (hung) {
- char msg[80];
- unsigned int tmp;
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(dev_priv, hung, msg);
- }
-
- /* Reset timer in case GPU hangs without another request being added */
- if (busy_count)
- i915_queue_hangcheck(dev_priv);
-}
-
static void ibx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3545,11 +3227,13 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev))
+ if (HAS_VEBOX(dev_priv)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
- dev_priv->pm_irq_mask = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+ dev_priv->pm_imr = 0xffffffff;
+ GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@@ -3669,14 +3353,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- dev_priv->pm_irq_mask = 0xffffffff;
+ dev_priv->pm_ier = 0x0;
+ dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -4460,6 +4145,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ if (HAS_GUC_SCHED(dev))
+ dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
@@ -4481,9 +4169,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 8)
dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
- INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
-
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 31e6edd..2a41950 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -288,7 +288,8 @@ static const struct intel_device_info intel_haswell_info = {
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS, \
- .has_logical_ring_contexts = 1
+ .has_logical_ring_contexts = 1, \
+ .has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
@@ -308,6 +309,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1,
+ .has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
@@ -347,6 +349,7 @@ static const struct intel_device_info intel_broxton_info = {
.has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
+ .has_64bit_reloc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 00efaa1..3361d7f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -830,96 +830,7 @@ enum skl_disp_power_wells {
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
-/**
- * DOC: DPIO
- *
- * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
- * ports. DPIO is the name given to such a display PHY. These PHYs
- * don't follow the standard programming model using direct MMIO
- * registers, and instead their registers must be accessed trough IOSF
- * sideband. VLV has one such PHY for driving ports B and C, and CHV
- * adds another PHY for driving port D. Each PHY responds to specific
- * IOSF-SB port.
- *
- * Each display PHY is made up of one or two channels. Each channel
- * houses a common lane part which contains the PLL and other common
- * logic. CH0 common lane also contains the IOSF-SB logic for the
- * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
- * must be running when any DPIO registers are accessed.
- *
- * In addition to having their own registers, the PHYs are also
- * controlled through some dedicated signals from the display
- * controller. These include PLL reference clock enable, PLL enable,
- * and CRI clock selection, for example.
- *
- * Eeach channel also has two splines (also called data lanes), and
- * each spline is made up of one Physical Access Coding Sub-Layer
- * (PCS) block and two TX lanes. So each channel has two PCS blocks
- * and four TX lanes. The TX lanes are used as DP lanes or TMDS
- * data/clock pairs depending on the output type.
- *
- * Additionally the PHY also contains an AUX lane with AUX blocks
- * for each channel. This is used for DP AUX communication, but
- * this fact isn't really relevant for the driver since AUX is
- * controlled from the display controller side. No DPIO registers
- * need to be accessed during AUX communication,
- *
- * Generally on VLV/CHV the common lane corresponds to the pipe and
- * the spline (PCS/TX) corresponds to the port.
- *
- * For dual channel PHY (VLV/CHV):
- *
- * pipe A == CMN/PLL/REF CH0
- *
- * pipe B == CMN/PLL/REF CH1
- *
- * port B == PCS/TX CH0
- *
- * port C == PCS/TX CH1
- *
- * This is especially important when we cross the streams
- * ie. drive port B with pipe B, or port C with pipe A.
- *
- * For single channel PHY (CHV):
- *
- * pipe C == CMN/PLL/REF CH0
- *
- * port D == PCS/TX CH0
- *
- * On BXT the entire PHY channel corresponds to the port. That means
- * the PLL is also now associated with the port rather than the pipe,
- * and so the clock needs to be routed to the appropriate transcoder.
- * Port A PLL is directly connected to transcoder EDP and port B/C
- * PLLs can be routed to any transcoder A/B/C.
- *
- * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT). ::
- *
- *
- * Dual channel PHY (VLV/CHV/BXT)
- * ---------------------------------
- * | CH0 | CH1 |
- * | CMN/PLL/REF | CMN/PLL/REF |
- * |---------------|---------------| Display PHY
- * | PCS01 | PCS23 | PCS01 | PCS23 |
- * |-------|-------|-------|-------|
- * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
- * ---------------------------------
- * | DDI0 | DDI1 | DP/HDMI ports
- * ---------------------------------
- *
- * Single channel PHY (CHV/BXT)
- * -----------------
- * | CH0 |
- * | CMN/PLL/REF |
- * |---------------| Display PHY
- * | PCS01 | PCS23 |
- * |-------|-------|
- * |TX0|TX1|TX2|TX3|
- * -----------------
- * | DDI2 | DP/HDMI port
- * -----------------
- */
+/* DPIO registers */
#define DPIO_DEVFN 0
#define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110)
@@ -1275,7 +1186,19 @@ enum skl_disp_power_wells {
#define DPIO_UPAR_SHIFT 30
/* BXT PHY registers */
-#define _BXT_PHY(phy, a, b) _MMIO_PIPE((phy), (a), (b))
+#define _BXT_PHY0_BASE 0x6C000
+#define _BXT_PHY1_BASE 0x162000
+#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE)
+
+#define _BXT_PHY(phy, reg) \
+ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
+
+#define _BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ (BXT_PHY_BASE(phy) + _PIPE((ch), (reg_ch0) - _BXT_PHY0_BASE, \
+ (reg_ch1) - _BXT_PHY0_BASE))
+#define _MMIO_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1) \
+ _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
@@ -1292,8 +1215,8 @@ enum skl_disp_power_wells {
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP)
+#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP)
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1313,18 +1236,18 @@ enum skl_disp_power_wells {
#define PORT_PLL_P2_SHIFT 8
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
-#define BXT_PORT_PLL_EBB_0(port) _MMIO_PORT3(port, _PORT_PLL_EBB_0_A, \
- _PORT_PLL_EBB_0_B, \
- _PORT_PLL_EBB_0_C)
+#define BXT_PORT_PLL_EBB_0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
#define _PORT_PLL_EBB_4_A 0x162038
#define _PORT_PLL_EBB_4_B 0x6C038
#define _PORT_PLL_EBB_4_C 0x6C344
#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
#define PORT_PLL_RECALIBRATE (1 << 14)
-#define BXT_PORT_PLL_EBB_4(port) _MMIO_PORT3(port, _PORT_PLL_EBB_4_A, \
- _PORT_PLL_EBB_4_B, \
- _PORT_PLL_EBB_4_C)
+#define BXT_PORT_PLL_EBB_4(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
#define _PORT_PLL_0_A 0x162100
#define _PORT_PLL_0_B 0x6C100
@@ -1355,57 +1278,56 @@ enum skl_disp_power_wells {
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) ((x)<<10)
-#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
- _PORT_PLL_0_B, \
- _PORT_PLL_0_C)
-#define BXT_PORT_PLL(port, idx) _MMIO(_PORT_PLL_BASE(port) + (idx) * 4)
+#define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(phy, ch, idx) _MMIO(_PORT_PLL_BASE(phy, ch) + \
+ (idx) * 4)
/* BXT PHY common lane registers */
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
#define PHY_RESERVED (1 << 7)
-#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
- _PORT_CL1CM_DW0_A)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
- _PORT_CL1CM_DW9_A)
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC)
#define _PORT_CL1CM_DW10_A 0x162028
#define _PORT_CL1CM_DW10_BC 0x6C028
#define IREF1RC_OFFSET_SHIFT 8
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
-#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
- _PORT_CL1CM_DW10_A)
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
#define SUS_CLK_CONFIG 0x3
-#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
- _PORT_CL1CM_DW28_A)
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC)
#define _PORT_CL1CM_DW30_A 0x162078
#define _PORT_CL1CM_DW30_BC 0x6C078
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
-#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
- _PORT_CL1CM_DW30_A)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
-/* Defined for PHY0 only */
-#define BXT_PORT_CL2CM_DW6_BC _MMIO(0x6C358)
+/* The spec defines this only for BXT PHY0, but lets assume that this
+ * would exist for PHY1 too if it had a second channel.
+ */
+#define _PORT_CL2CM_DW6_A 0x162358
+#define _PORT_CL2CM_DW6_BC 0x6C358
+#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
#define GRC_DONE (1 << 22)
-#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC, \
- _PORT_REF_DW3_A)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC)
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
@@ -1416,15 +1338,13 @@ enum skl_disp_power_wells {
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
-#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC, \
- _PORT_REF_DW6_A)
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC)
#define _PORT_REF_DW8_A 0x1621A0
#define _PORT_REF_DW8_BC 0x6C1A0
#define GRC_DIS (1 << 15)
#define GRC_RDY_OVRD (1 << 1)
-#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC, \
- _PORT_REF_DW8_A)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC)
/* BXT PHY PCS registers */
#define _PORT_PCS_DW10_LN01_A 0x162428
@@ -1433,12 +1353,13 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW10_GRP_A 0x162C28
#define _PORT_PCS_DW10_GRP_B 0x6CC28
#define _PORT_PCS_DW10_GRP_C 0x6CE28
-#define BXT_PORT_PCS_DW10_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW10_LN01_A, \
- _PORT_PCS_DW10_LN01_B, \
- _PORT_PCS_DW10_LN01_C)
-#define BXT_PORT_PCS_DW10_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW10_GRP_A, \
- _PORT_PCS_DW10_GRP_B, \
- _PORT_PCS_DW10_GRP_C)
+#define BXT_PORT_PCS_DW10_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+
#define TX2_SWING_CALC_INIT (1 << 31)
#define TX1_SWING_CALC_INIT (1 << 30)
@@ -1453,15 +1374,15 @@ enum skl_disp_power_wells {
#define _PORT_PCS_DW12_GRP_C 0x6CE30
#define LANESTAGGER_STRAP_OVRD (1 << 6)
#define LANE_STAGGER_MASK 0x1F
-#define BXT_PORT_PCS_DW12_LN01(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN01_A, \
- _PORT_PCS_DW12_LN01_B, \
- _PORT_PCS_DW12_LN01_C)
-#define BXT_PORT_PCS_DW12_LN23(port) _MMIO_PORT3(port, _PORT_PCS_DW12_LN23_A, \
- _PORT_PCS_DW12_LN23_B, \
- _PORT_PCS_DW12_LN23_C)
-#define BXT_PORT_PCS_DW12_GRP(port) _MMIO_PORT3(port, _PORT_PCS_DW12_GRP_A, \
- _PORT_PCS_DW12_GRP_B, \
- _PORT_PCS_DW12_GRP_C)
+#define BXT_PORT_PCS_DW12_LN01(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
/* BXT PHY TX registers */
#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
@@ -1473,12 +1394,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW2_GRP_A 0x162D08
#define _PORT_TX_DW2_GRP_B 0x6CD08
#define _PORT_TX_DW2_GRP_C 0x6CF08
-#define BXT_PORT_TX_DW2_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW2_GRP_A, \
- _PORT_TX_DW2_GRP_B, \
- _PORT_TX_DW2_GRP_C)
-#define BXT_PORT_TX_DW2_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW2_LN0_A, \
- _PORT_TX_DW2_LN0_B, \
- _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define BXT_PORT_TX_DW2_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
#define MARGIN_000_SHIFT 16
#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
#define UNIQ_TRANS_SCALE_SHIFT 8
@@ -1490,12 +1411,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW3_GRP_A 0x162D0C
#define _PORT_TX_DW3_GRP_B 0x6CD0C
#define _PORT_TX_DW3_GRP_C 0x6CF0C
-#define BXT_PORT_TX_DW3_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW3_GRP_A, \
- _PORT_TX_DW3_GRP_B, \
- _PORT_TX_DW3_GRP_C)
-#define BXT_PORT_TX_DW3_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW3_LN0_A, \
- _PORT_TX_DW3_LN0_B, \
- _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define BXT_PORT_TX_DW3_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
#define SCALE_DCOMP_METHOD (1 << 26)
#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
@@ -1505,12 +1426,12 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW4_GRP_A 0x162D10
#define _PORT_TX_DW4_GRP_B 0x6CD10
#define _PORT_TX_DW4_GRP_C 0x6CF10
-#define BXT_PORT_TX_DW4_LN0(port) _MMIO_PORT3(port, _PORT_TX_DW4_LN0_A, \
- _PORT_TX_DW4_LN0_B, \
- _PORT_TX_DW4_LN0_C)
-#define BXT_PORT_TX_DW4_GRP(port) _MMIO_PORT3(port, _PORT_TX_DW4_GRP_A, \
- _PORT_TX_DW4_GRP_B, \
- _PORT_TX_DW4_GRP_C)
+#define BXT_PORT_TX_DW4_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
@@ -1519,10 +1440,10 @@ enum skl_disp_power_wells {
#define _PORT_TX_DW14_LN0_C 0x6C938
#define LATENCY_OPTIM_SHIFT 30
#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
-#define BXT_PORT_TX_DW14_LN(port, lane) _MMIO(_PORT3((port), _PORT_TX_DW14_LN0_A, \
- _PORT_TX_DW14_LN0_B, \
- _PORT_TX_DW14_LN0_C) + \
- _BXT_LANE_OFFSET(lane))
+#define BXT_PORT_TX_DW14_LN(phy, ch, lane) \
+ _MMIO(_BXT_PHY_CH(phy, ch, _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C) + \
+ _BXT_LANE_OFFSET(lane))
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 _MMIO(0x4F074)
@@ -2188,8 +2109,9 @@ enum skl_disp_power_wells {
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
-#define FBC_STATUS2 _MMIO(0x43214)
-#define FBC_COMPRESSION_MASK 0x7ff
+#define FBC_STATUS2 _MMIO(0x43214)
+#define IVB_FBC_COMPRESSION_MASK 0x7ff
+#define BDW_FBC_COMPRESSION_MASK 0xfff
#define FBC_LL_SIZE (1536)
@@ -6015,6 +5937,7 @@ enum {
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
#define GEN8_GT_VECS_IRQ (1<<6)
+#define GEN8_GT_GUC_IRQ (1<<5)
#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
@@ -6026,6 +5949,16 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
+#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31)
+#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30)
+#define GEN9_GUC_DISPLAY_EVENT (1<<29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28)
+#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27)
+#define GEN9_GUC_DB_RING_EVENT (1<<26)
+#define GEN9_GUC_DMA_DONE_EVENT (1<<25)
+#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24)
+#define GEN9_GUC_NOTIFICATION_EVENT (1<<23)
+
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
@@ -7358,6 +7291,13 @@ enum {
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
+#define HSW_AUD_M_CTS_ENABLE(pipe) _MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
+#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
+#define AUD_CONFIG_M_MASK 0xfffff
+
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 8185002..95f2f12 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -13,6 +13,8 @@
#include "i915_sw_fence.h"
+#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
+
static DEFINE_SPINLOCK(i915_sw_fence_lock);
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
@@ -135,6 +137,8 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
list_del(&wq->task_list);
__i915_sw_fence_complete(wq->private, key);
i915_sw_fence_put(wq->private);
+ if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
+ kfree(wq);
return 0;
}
@@ -192,9 +196,9 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
return err;
}
-int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
- struct i915_sw_fence *signaler,
- wait_queue_t *wq)
+static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq, gfp_t gfp)
{
unsigned long flags;
int pending;
@@ -206,8 +210,22 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
+ pending = 0;
+ if (!wq) {
+ wq = kmalloc(sizeof(*wq), gfp);
+ if (!wq) {
+ if (!gfpflags_allow_blocking(gfp))
+ return -ENOMEM;
+
+ i915_sw_fence_wait(signaler);
+ return 0;
+ }
+
+ pending |= I915_SW_FENCE_FLAG_ALLOC;
+ }
+
INIT_LIST_HEAD(&wq->task_list);
- wq->flags = 0;
+ wq->flags = pending;
wq->func = i915_sw_fence_wake;
wq->private = i915_sw_fence_get(fence);
@@ -226,6 +244,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return pending;
}
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
+}
+
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ gfp_t gfp)
+{
+ return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
+}
+
struct i915_sw_dma_fence_cb {
struct dma_fence_cb base;
struct i915_sw_fence *fence;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index cd239e9..707dfc4 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -46,6 +46,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
wait_queue_t *wq);
+int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
+ struct i915_sw_fence *after,
+ gfp_t gfp);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
unsigned long timeout,
@@ -62,4 +65,9 @@ static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
return atomic_read(&fence->pending) < 0;
}
+static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
+{
+ wait_event(fence->wait, i915_sw_fence_done(fence));
+}
+
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5c912c2..c5d210e 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->engine->id;
__entry->sync_to = to->engine->id;
- __entry->seqno = from->fence.seqno;
+ __entry->seqno = from->global_seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -489,7 +489,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->flags = flags;
dma_fence_enable_sw_signaling(&req->fence);
),
@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->fence.seqno;
+ __entry->seqno = req->global_seqno;
__entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex);
),
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index c762ae5..cb55944 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -84,7 +84,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
- intel_state->wait_req = NULL;
return state;
}
@@ -101,7 +100,6 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state);
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 7093cfb..813fd74 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -57,6 +57,63 @@
* struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
+/* DP N/M table */
+#define LC_540M 540000
+#define LC_270M 270000
+#define LC_162M 162000
+
+struct dp_aud_n_m {
+ int sample_rate;
+ int clock;
+ u16 m;
+ u16 n;
+};
+
+/* Values according to DP 1.4 Table 2-104 */
+static const struct dp_aud_n_m dp_aud_n_m[] = {
+ { 32000, LC_162M, 1024, 10125 },
+ { 44100, LC_162M, 784, 5625 },
+ { 48000, LC_162M, 512, 3375 },
+ { 64000, LC_162M, 2048, 10125 },
+ { 88200, LC_162M, 1568, 5625 },
+ { 96000, LC_162M, 1024, 3375 },
+ { 128000, LC_162M, 4096, 10125 },
+ { 176400, LC_162M, 3136, 5625 },
+ { 192000, LC_162M, 2048, 3375 },
+ { 32000, LC_270M, 1024, 16875 },
+ { 44100, LC_270M, 784, 9375 },
+ { 48000, LC_270M, 512, 5625 },
+ { 64000, LC_270M, 2048, 16875 },
+ { 88200, LC_270M, 1568, 9375 },
+ { 96000, LC_270M, 1024, 5625 },
+ { 128000, LC_270M, 4096, 16875 },
+ { 176400, LC_270M, 3136, 9375 },
+ { 192000, LC_270M, 2048, 5625 },
+ { 32000, LC_540M, 1024, 33750 },
+ { 44100, LC_540M, 784, 18750 },
+ { 48000, LC_540M, 512, 11250 },
+ { 64000, LC_540M, 2048, 33750 },
+ { 88200, LC_540M, 1568, 18750 },
+ { 96000, LC_540M, 1024, 11250 },
+ { 128000, LC_540M, 4096, 33750 },
+ { 176400, LC_540M, 3136, 18750 },
+ { 192000, LC_540M, 2048, 11250 },
+};
+
+static const struct dp_aud_n_m *
+audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+ if (rate == dp_aud_n_m[i].sample_rate &&
+ intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
+ return &dp_aud_n_m[i];
+ }
+
+ return NULL;
+}
+
static const struct {
int clock;
u32 config;
@@ -225,16 +282,43 @@ hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct i915_audio_component *acomp = dev_priv->audio_component;
+ int rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
+ if (nm)
+ DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+ else
+ DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ if (nm) {
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(nm->n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ }
+
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_CONFIG_M_MASK;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+ if (nm) {
+ tmp |= nm->m;
+ tmp |= AUD_M_CTS_M_VALUE_INDEX;
+ tmp |= AUD_M_CTS_M_PROG_ENABLE;
+ }
+
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
}
static void
@@ -254,19 +338,24 @@ hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
- if (adjusted_mode->crtc_clock == TMDS_296M ||
- adjusted_mode->crtc_clock == TMDS_297M) {
- n = audio_config_hdmi_get_n(adjusted_mode, rate);
- if (n != 0) {
- tmp &= ~AUD_CONFIG_N_MASK;
- tmp |= AUD_CONFIG_N(n);
- tmp |= AUD_CONFIG_N_PROG_ENABLE;
- } else {
- DRM_DEBUG_KMS("no suitable N value is found\n");
- }
+ n = audio_config_hdmi_get_n(adjusted_mode, rate);
+ if (n != 0) {
+ DRM_DEBUG_KMS("using N %d\n", n);
+
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ } else {
+ DRM_DEBUG_KMS("using automatic N\n");
}
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp &= ~AUD_CONFIG_M_MASK;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ tmp |= AUD_M_CTS_M_PROG_ENABLE;
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
}
static void
@@ -687,7 +776,8 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* 1. get the pipe */
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc ||
- intel_encoder->type != INTEL_OUTPUT_HDMI) {
+ (intel_encoder->type != INTEL_OUTPUT_HDMI &&
+ intel_encoder->type != INTEL_OUTPUT_DP)) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 56efcc5..c410d3d 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -83,16 +83,18 @@ static void irq_enable(struct intel_engine_cs *engine)
*/
engine->breadcrumbs.irq_posted = true;
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_enable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
- spin_lock_irq(&engine->i915->irq_lock);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
- spin_unlock_irq(&engine->i915->irq_lock);
+ spin_unlock(&engine->i915->irq_lock);
engine->breadcrumbs.irq_posted = false;
}
@@ -293,9 +295,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
first = __intel_engine_add_wait(engine, wait);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
return first;
}
@@ -326,7 +328,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
if (RB_EMPTY_NODE(&wait->node))
return;
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (RB_EMPTY_NODE(&wait->node))
goto out_unlock;
@@ -400,7 +402,7 @@ out_unlock:
GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
static bool signal_complete(struct drm_i915_gem_request *request)
@@ -473,14 +475,14 @@ static int intel_breadcrumbs_signaler(void *arg)
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
if (request == b->first_signal) {
struct rb_node *rb =
rb_next(&request->signaling.node);
b->first_signal = rb ? to_signaler(rb) : NULL;
}
rb_erase(&request->signaling.node, &b->signals);
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
i915_gem_request_put(request);
} else {
@@ -502,11 +504,20 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
- /* locked by dma_fence_enable_sw_signaling() */
+ /* Note that we may be called from an interrupt handler on another
+ * device (e.g. nouveau signaling a fence completion causing us
+ * to submit a request, and so enable signaling). As such,
+ * we need to make sure that all other users of b->lock protect
+ * against interrupts, i.e. use spin_lock_irqsave.
+ */
+
+ /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
assert_spin_locked(&request->lock);
+ if (!request->global_seqno)
+ return;
request->signaling.wait.tsk = b->signaler;
- request->signaling.wait.seqno = request->fence.seqno;
+ request->signaling.wait.seqno = request->global_seqno;
i915_gem_request_get(request);
spin_lock(&b->lock);
@@ -530,8 +541,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
- if (i915_seqno_passed(request->fence.seqno,
- to_signaler(parent)->fence.seqno)) {
+ if (i915_seqno_passed(request->global_seqno,
+ to_signaler(parent)->global_seqno)) {
p = &parent->rb_right;
first = false;
} else {
@@ -592,7 +603,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
- spin_lock(&b->lock);
+ spin_lock_irq(&b->lock);
__intel_breadcrumbs_disable_irq(b);
if (intel_engine_has_waiter(engine)) {
@@ -605,7 +616,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
irq_disable(engine);
}
- spin_unlock(&b->lock);
+ spin_unlock_irq(&b->lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a97151f..30eb95b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -573,7 +573,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 1ea0e1f..d7a04bc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -168,12 +168,6 @@ struct stepping_info {
char substepping;
};
-static const struct stepping_info kbl_stepping_info[] = {
- {'A', '0'}, {'B', '0'}, {'C', '0'},
- {'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'},
-};
-
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
@@ -194,10 +188,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
- if (IS_KABYLAKE(dev_priv)) {
- size = ARRAY_SIZE(kbl_stepping_info);
- si = kbl_stepping_info;
- } else if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fb18d69..938ac4d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1547,7 +1547,6 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
{
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
- uint32_t val;
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
@@ -1576,38 +1575,11 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
}
}
- /*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers and we pick lanes 0/1 for that.
- */
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
- val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
- ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
- val &= ~SCALE_DCOMP_METHOD;
- if (ddi_translations[level].enable)
- val |= SCALE_DCOMP_METHOD;
-
- if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
-
- I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
-
- val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
- val &= ~DE_EMPHASIS;
- val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
-
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
- val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+ bxt_ddi_phy_set_signal_level(dev_priv, port,
+ ddi_translations[level].margin,
+ ddi_translations[level].scale,
+ ddi_translations[level].enable,
+ ddi_translations[level].deemphasis);
}
static uint32_t translate_signal_level(int signal_levels)
@@ -1923,332 +1895,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
}
}
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- enum port port;
-
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
- return false;
-
- if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
- phy);
-
- return false;
- }
-
- if (phy == DPIO_PHY1 &&
- !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
- DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
-
- return false;
- }
-
- if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
- phy);
-
- return false;
- }
-
- for_each_port_masked(port,
- phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
- BIT(PORT_A)) {
- u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
- if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
- "for port %c powered down "
- "(PHY_CTL %08x)\n",
- phy, port_name(port), tmp);
-
- return false;
- }
- }
-
- return true;
-}
-
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
-
- return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
-}
-
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- if (intel_wait_for_register(dev_priv,
- BXT_PORT_REF_DW3(phy),
- GRC_DONE, GRC_DONE,
- 10))
- DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
-}
-
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- u32 val;
-
- if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
- /* Still read out the GRC value for state verification */
- if (phy == DPIO_PHY0)
- dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
-
- if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
- "won't reprogram it\n", phy);
-
- return;
- }
-
- DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
- "force reprogramming it\n", phy);
- }
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val |= GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-
- /*
- * The PHY registers start out inaccessible and respond to reads with
- * all 1s. Eventually they become accessible as they power up, then
- * the reserved bit will give the default 0. Poll on the reserved bit
- * becoming 0 to find when the PHY is accessible.
- * HW team confirmed that the time to reach phypowergood status is
- * anywhere between 50 us and 100us.
- */
- if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
- (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
- DRM_ERROR("timeout during PHY%d power on\n", phy);
- }
-
- /* Program PLL Rcomp code offset */
- val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
-
- val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
-
- /* Program power gating */
- val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy == DPIO_PHY0) {
- val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
- }
-
- val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
- val &= ~OCL2_LDOFUSE_PWR_DIS;
- /*
- * On PHY1 disable power on the second channel, since no port is
- * connected there. On PHY0 both channels have a port, so leave it
- * enabled.
- * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
- * power down the second channel on PHY0 as well.
- *
- * FIXME: Clarify programming of the following, the register is
- * read-only with bit 6 fixed at 0 at least in stepping A.
- */
- if (phy == DPIO_PHY1)
- val |= OCL2_LDOFUSE_PWR_DIS;
- I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
-
- if (phy == DPIO_PHY0) {
- uint32_t grc_code;
- /*
- * PHY0 isn't connected to an RCOMP resistor so copy over
- * the corresponding calibrated value from PHY1, and disable
- * the automatic calibration on PHY0.
- */
- val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
- grc_code = val << GRC_CODE_FAST_SHIFT |
- val << GRC_CODE_SLOW_SHIFT |
- val;
- I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
-
- val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
- val |= GRC_DIS | GRC_RDY_OVRD;
- I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
- }
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- if (phy == DPIO_PHY1)
- bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-}
-
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
-{
- uint32_t val;
-
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val &= ~GT_DISPLAY_POWER_ON(phy);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
-}
-
-static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- i915_reg_t reg, u32 mask, u32 expected,
- const char *reg_fmt, ...)
-{
- struct va_format vaf;
- va_list args;
- u32 val;
-
- val = I915_READ(reg);
- if ((val & mask) == expected)
- return true;
-
- va_start(args, reg_fmt);
- vaf.fmt = reg_fmt;
- vaf.va = &args;
-
- DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
- "current %08x, expected %08x (mask %08x)\n",
- phy, &vaf, reg.reg, val, (val & ~mask) | expected,
- mask);
-
- va_end(args);
-
- return false;
-}
-
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- uint32_t mask;
- bool ok;
-
-#define _CHK(reg, mask, exp, fmt, ...) \
- __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
- ## __VA_ARGS__)
-
- if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
- return false;
-
- ok = true;
-
- /* PLL Rcomp code offset */
- ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
- IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW9(%d)", phy);
- ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
- IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
- "BXT_PORT_CL1CM_DW10(%d)", phy);
-
- /* Power gating */
- mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
- ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
- "BXT_PORT_CL1CM_DW28(%d)", phy);
-
- if (phy == DPIO_PHY0)
- ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
- DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
- "BXT_PORT_CL2CM_DW6_BC");
-
- /*
- * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
- * at least on stepping A this bit is read-only and fixed at 0.
- */
-
- if (phy == DPIO_PHY0) {
- u32 grc_code = dev_priv->bxt_phy_grc;
-
- grc_code = grc_code << GRC_CODE_FAST_SHIFT |
- grc_code << GRC_CODE_SLOW_SHIFT |
- grc_code;
- mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
- GRC_CODE_NOM_MASK;
- ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
- "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
-
- mask = GRC_DIS | GRC_RDY_OVRD;
- ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
- "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
- }
-
- return ok;
-#undef _CHK
-}
-
-static uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- switch (pipe_config->lane_count) {
- case 1:
- return 0;
- case 2:
- return BIT(2) | BIT(0);
- case 4:
- return BIT(3) | BIT(2) | BIT(0);
- default:
- MISSING_CASE(pipe_config->lane_count);
-
- return 0;
- }
-}
-
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- int lane;
-
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- /*
- * Note that on CHV this flag is called UPAR, but has
- * the same function.
- */
- val &= ~LATENCY_OPTIM;
- if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
- val |= LATENCY_OPTIM;
-
- I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
- }
-}
-
-static uint8_t
-bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
- int lane;
- uint8_t mask;
-
- mask = 0;
- for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-
- if (val & LATENCY_OPTIM)
- mask |= BIT(lane);
- }
+ uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
- return mask;
+ bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2417,7 +2071,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
- pipe_config);
+ pipe_config->lane_count);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d6a8f11..185e3bb 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -282,12 +282,13 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
- else
+ } else if (INTEL_GEN(dev_priv) >= 5) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
+ }
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 895b3dc..92ab01f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,7 +37,6 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -116,8 +115,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
@@ -1008,10 +1008,8 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
target_clock, refclk, NULL, best_clock);
}
-bool intel_crtc_active(struct drm_crtc *crtc)
+bool intel_crtc_active(struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*
@@ -1025,17 +1023,16 @@ bool intel_crtc_active(struct drm_crtc *crtc)
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
- return intel_crtc->active && crtc->primary->state->fb &&
- intel_crtc->config->base.adjusted_mode.crtc_clock;
+ return crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->base.adjusted_mode.crtc_clock;
}
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- return intel_crtc->config->cpu_transcoder;
+ return crtc->config->cpu_transcoder;
}
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
@@ -1788,8 +1785,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
i915_reg_t reg;
uint32_t val, pipeconf_val;
@@ -4253,6 +4250,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here
@@ -4267,7 +4265,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
continue;
if (crtc->flip_work)
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
return true;
}
@@ -4944,7 +4942,7 @@ void hsw_disable_ips(struct intel_crtc *crtc)
}
/* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
@@ -5056,7 +5054,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
if (HAS_GMCH_DISPLAY(dev_priv)) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
}
@@ -5075,7 +5073,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
crtc->wm.cxsr_allowed = true;
if (pipe_config->update_wm_post && pipe_config->base.active)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
if (old_pri_state) {
struct intel_plane_state *primary_state =
@@ -5133,7 +5131,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_crtc_state->base.active) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
}
@@ -5146,7 +5144,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
*/
if (pipe_config->disable_lp_wm) {
ilk_disable_lp_wm(dev);
- intel_wait_for_vblank(dev, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
/*
@@ -5173,7 +5171,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(pipe_config);
else if (pipe_config->update_wm_pre)
- intel_update_watermarks(&crtc->base);
+ intel_update_watermarks(crtc);
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
@@ -5401,7 +5399,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5493,7 +5491,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(pipe_config);
else
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
@@ -5511,8 +5509,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder) {
- intel_wait_for_vblank(dev, pipe);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
@@ -5522,8 +5520,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
* to change the workaround. */
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
- intel_wait_for_vblank(dev, hsw_workaround_pipe);
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+ intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
}
}
@@ -5844,10 +5842,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
static int skl_calc_cdclk(int max_pixclk, int vco);
-static void intel_update_max_cdclk(struct drm_device *dev)
+static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -5905,11 +5901,9 @@ static void intel_update_max_cdclk(struct drm_device *dev)
dev_priv->max_dotclk_freq);
}
-static void intel_update_cdclk(struct drm_device *dev)
+static void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
if (INTEL_GEN(dev_priv) >= 9)
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
@@ -6070,14 +6064,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
return;
}
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6210,7 +6204,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
dev_priv->skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(&dev_priv->drm);
+ intel_update_max_cdclk(dev_priv);
}
static void
@@ -6296,7 +6290,6 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
- struct drm_device *dev = &dev_priv->drm;
u32 freq_select, pcu_ack;
WARN_ON((cdclk == 24000) != (vco == 0));
@@ -6347,7 +6340,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
@@ -6394,7 +6387,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk_pll.vco == 0 ||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -6428,7 +6421,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
@@ -6485,7 +6478,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
mutex_unlock(&dev_priv->sb_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -6493,7 +6486,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
!= dev_priv->cdclk_freq);
switch (cdclk) {
@@ -6526,7 +6519,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -6746,7 +6739,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6799,7 +6792,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6837,7 +6830,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
* wait for planes to fully turn off before disabling the pipe.
*/
if (IS_GEN2(dev_priv))
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6915,7 +6908,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
encoder->base.crtc = NULL;
intel_fbc_disable(intel_crtc);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
domains = intel_crtc->enabled_power_domains;
@@ -7075,7 +7068,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
if (pipe_config->fdi_lanes <= 2)
return 0;
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7094,7 +7087,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -7252,10 +7245,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return 0;
}
-static int skylake_get_display_clock_speed(struct drm_device *dev)
+static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t cdctl;
+ u32 cdctl;
skl_dpll0_update(dev_priv);
@@ -7314,9 +7306,8 @@ static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
dev_priv->cdclk_pll.ref;
}
-static int broxton_get_display_clock_speed(struct drm_device *dev)
+static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 divider;
int div, vco;
@@ -7349,9 +7340,8 @@ static int broxton_get_display_clock_speed(struct drm_device *dev)
return DIV_ROUND_CLOSEST(vco, div);
}
-static int broadwell_get_display_clock_speed(struct drm_device *dev)
+static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7369,9 +7359,8 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
return 675000;
}
-static int haswell_get_display_clock_speed(struct drm_device *dev)
+static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -7387,35 +7376,35 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
return 540000;
}
-static int valleyview_get_display_clock_speed(struct drm_device *dev)
+static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
+ return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL);
}
-static int ilk_get_display_clock_speed(struct drm_device *dev)
+static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 450000;
}
-static int i945_get_display_clock_speed(struct drm_device *dev)
+static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 400000;
}
-static int i915_get_display_clock_speed(struct drm_device *dev)
+static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 333333;
}
-static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 200000;
}
-static int pnv_get_display_clock_speed(struct drm_device *dev)
+static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7438,9 +7427,9 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i915gm_get_display_clock_speed(struct drm_device *dev)
+static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -7458,14 +7447,14 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i865_get_display_clock_speed(struct drm_device *dev)
+static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 266667;
}
-static int i85x_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u16 hpllcc = 0;
/*
@@ -7501,14 +7490,13 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
return 0;
}
-static int i830_get_display_clock_speed(struct drm_device *dev)
+static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
return 133333;
}
-static unsigned int intel_hpll_vco(struct drm_device *dev)
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
static const unsigned int blb_vco[8] = {
[0] = 3200000,
[1] = 4000000,
@@ -7555,16 +7543,16 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
vco_table = ctg_vco;
else if (IS_G4X(dev_priv))
vco_table = elk_vco;
- else if (IS_CRESTLINE(dev))
+ else if (IS_CRESTLINE(dev_priv))
vco_table = cl_vco;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
vco_table = pnv_vco;
- else if (IS_G33(dev))
+ else if (IS_G33(dev_priv))
vco_table = blb_vco;
else
return 0;
- tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -7575,10 +7563,10 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
return vco;
}
-static int gm45_get_display_clock_speed(struct drm_device *dev)
+static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7598,14 +7586,14 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
}
}
-static int i965gm_get_display_clock_speed(struct drm_device *dev)
+static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 16, 10, 8 };
static const uint8_t div_4000[] = { 20, 12, 10 };
static const uint8_t div_5333[] = { 24, 16, 14 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7636,15 +7624,15 @@ fail:
return 200000;
}
-static int g33_get_display_clock_speed(struct drm_device *dev)
+static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+ unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
uint16_t tmp = 0;
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -7733,10 +7721,10 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 fp, fp2 = 0;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
@@ -8106,11 +8094,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *pipe_config;
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
@@ -8121,7 +8108,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
- if (IS_CHERRYVIEW(to_i915(dev))) {
+ if (IS_CHERRYVIEW(dev_priv)) {
chv_compute_dpll(crtc, pipe_config);
chv_prepare_pll(crtc, pipe_config);
chv_enable_pll(crtc, pipe_config);
@@ -8144,20 +8131,19 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (IS_CHERRYVIEW(to_i915(dev)))
- chv_disable_pll(to_i915(dev), pipe);
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
else
- vlv_disable_pll(to_i915(dev), pipe);
+ vlv_disable_pll(dev_priv, pipe);
}
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
struct dpll *clock = &crtc_state->dpll;
@@ -8183,7 +8169,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -8204,7 +8190,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -8218,7 +8204,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
crtc_state->dpll_hw_state.dpll = dpll;
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc_state->dpll_hw_state.dpll_md = dpll_md;
@@ -10191,7 +10177,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_update_cdclk(&dev_priv->drm);
+ intel_update_cdclk(dev_priv);
}
/*
@@ -10261,6 +10247,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
bxt_set_cdclk(to_i915(dev), req_cdclk);
}
+static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+ int pixel_rate)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than
+ * 432 MHz, audio enabled, port width x4, and link rate
+ * HBR2 (5.4 GHz), or else there may be audio corruption or
+ * screen corruption."
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->has_audio &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4)
+ pixel_rate = max(432000, pixel_rate);
+
+ return pixel_rate;
+}
+
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
@@ -10286,9 +10295,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+ if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+ pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+ pixel_rate);
intel_state->min_pixclk[i] = pixel_rate;
}
@@ -10371,7 +10380,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
WARN(cdclk != dev_priv->cdclk_freq,
"cdclk requested %d kHz but got %d kHz\n",
@@ -10736,10 +10745,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
if (INTEL_INFO(dev)->gen >= 9) {
- skl_init_scalers(dev, crtc, pipe_config);
- }
+ skl_init_scalers(dev_priv, crtc, pipe_config);
- if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
@@ -11051,7 +11058,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -11136,6 +11143,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
@@ -11288,7 +11296,7 @@ found:
old->restore_state = restore_state;
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return true;
fail:
@@ -11367,7 +11375,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
fp = pipe_config->dpll_hw_state.fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev)) {
+ if (IS_PINEVIEW(dev_priv)) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -11376,7 +11384,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
}
if (!IS_GEN2(dev_priv)) {
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -11398,7 +11406,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
return;
}
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
@@ -11667,8 +11675,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11681,12 +11688,12 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
!is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11694,8 +11701,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
unsigned long flags;
@@ -11708,12 +11714,12 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL &&
is_mmio_work(work) &&
- pageflip_finished(intel_crtc, work))
- page_flip_completed(intel_crtc);
+ pageflip_finished(crtc, work))
+ page_flip_completed(crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -11967,8 +11973,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
- struct reservation_object *resv;
-
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
@@ -11990,12 +11994,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
else if (i915.enable_execlists)
return true;
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv && !reservation_object_test_signaled_rcu(resv, false))
- return true;
-
- return engine != i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ return engine != i915_gem_object_last_write_engine(obj);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -12068,17 +12067,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
- struct reservation_object *resv;
- if (work->flip_queued_req)
- WARN_ON(i915_wait_request(work->flip_queued_req,
- 0, NULL, NO_WAITBOOST));
-
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv)
- WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
@@ -12141,8 +12131,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
WARN_ON(!in_interrupt());
@@ -12151,19 +12140,19 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
return;
spin_lock(&dev->event_lock);
- work = intel_crtc->flip_work;
+ work = crtc->flip_work;
if (work != NULL && !is_mmio_work(work) &&
- __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+ __pageflip_stall_check_cs(dev_priv, crtc, work)) {
WARN_ONCE(1,
"Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
- page_flip_completed(intel_crtc);
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
+ page_flip_completed(crtc);
work = NULL;
}
if (work != NULL && !is_mmio_work(work) &&
- intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+ intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -12279,8 +12268,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
engine = dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- engine = i915_gem_active_get_engine(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
engine = dev_priv->engine[BCS];
} else {
@@ -12312,9 +12300,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
-
- work->flip_queued_req = i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine, engine->last_context);
@@ -12360,7 +12345,7 @@ cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
@@ -13479,13 +13464,13 @@ static void verify_wm_state(struct drm_crtc *crtc,
return;
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
- sw_wm = &intel_crtc->wm.active.skl;
+ sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
/* planes */
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
hw_plane_wm = &hw_wm.planes[plane];
sw_plane_wm = &sw_wm->planes[plane];
@@ -14154,13 +14139,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+ struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
@@ -14183,28 +14165,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = drm_atomic_helper_prepare_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (!ret && !nonblock) {
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- I915_WAIT_INTERRUPTIBLE,
- NULL, NULL);
- if (ret) {
- /* Any hang should be swallowed by the wait */
- WARN_ON(ret == -EIO);
- mutex_lock(&dev->struct_mutex);
- drm_atomic_helper_cleanup_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
- break;
- }
- }
- }
-
return ret;
}
@@ -14230,22 +14190,24 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
return;
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
if (!((1 << pipe) & crtc_mask))
continue;
- ret = drm_crtc_vblank_get(crtc);
+ ret = drm_crtc_vblank_get(&crtc->base);
if (WARN_ON(ret != 0)) {
crtc_mask &= ~(1 << pipe);
continue;
}
- last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+ last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
}
for_each_pipe(dev_priv, pipe) {
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
long lret;
if (!((1 << pipe) & crtc_mask))
@@ -14253,12 +14215,12 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
lret = wait_event_timeout(dev->vblank[pipe].queue,
last_vblank_count[pipe] !=
- drm_crtc_vblank_count(crtc),
+ drm_crtc_vblank_count(&crtc->base),
msecs_to_jiffies(50));
WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
- drm_crtc_vblank_put(crtc);
+ drm_crtc_vblank_put(&crtc->base);
}
}
@@ -14332,7 +14294,7 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int *crtc_vblank_mask)
{
- struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
@@ -14383,7 +14345,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
crtc_vblank_mask);
if (vbl_wait)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
progress = true;
}
@@ -14398,26 +14360,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
- int i, ret;
-
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane->state);
-
- if (!intel_plane_state->wait_req)
- continue;
-
- ret = i915_wait_request(intel_plane_state->wait_req,
- 0, NULL, NULL);
- /* EIO should be eaten, and we can't get interrupted in the
- * worker, and blocking commits have waited already. */
- WARN_ON(ret);
- }
+ int i;
drm_atomic_helper_wait_for_dependencies(state);
@@ -14462,7 +14408,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_pch_fifo_underruns(dev_priv);
if (!crtc->state->active)
- intel_update_watermarks(crtc);
+ intel_update_watermarks(intel_crtc);
}
}
@@ -14572,12 +14518,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
static void intel_atomic_commit_work(struct work_struct *work)
{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
+ struct drm_atomic_state *state =
+ container_of(work, struct drm_atomic_state, commit_work);
+
intel_atomic_commit_tail(state);
}
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify notify)
+{
+ struct intel_atomic_state *state =
+ container_of(fence, struct intel_atomic_state, commit_ready);
+
+ switch (notify) {
+ case FENCE_COMPLETE:
+ if (state->base.commit_work.func)
+ queue_work(system_unbound_wq, &state->base.commit_work);
+ break;
+
+ case FENCE_FREE:
+ drm_atomic_state_put(&state->base);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void intel_atomic_track_fbs(struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state;
@@ -14623,11 +14590,14 @@ static int intel_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
- INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+ drm_atomic_state_get(state);
+ i915_sw_fence_init(&intel_state->commit_ready,
+ intel_atomic_commit_ready);
- ret = intel_atomic_prepare_commit(dev, state, nonblock);
+ ret = intel_atomic_prepare_commit(dev, state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ i915_sw_fence_commit(&intel_state->commit_ready);
return ret;
}
@@ -14638,10 +14608,14 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_track_fbs(state);
drm_atomic_state_get(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
+ INIT_WORK(&state->commit_work,
+ nonblock ? intel_atomic_commit_work : NULL);
+
+ i915_sw_fence_commit(&intel_state->commit_ready);
+ if (!nonblock) {
+ i915_sw_fence_wait(&intel_state->commit_ready);
intel_atomic_commit_tail(state);
+ }
return 0;
}
@@ -14753,20 +14727,22 @@ int
intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_state->state);
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = new_state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
- struct reservation_object *resv;
- int ret = 0;
+ int ret;
if (!obj && !old_obj)
return 0;
if (old_obj) {
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
+ drm_atomic_get_existing_crtc_state(new_state->state,
+ plane->state->crtc);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14779,52 +14755,56 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state))
- ret = i915_gem_object_wait_rendering(old_obj, true);
- if (ret) {
- /* GPU hangs should have been swallowed by the wait */
- WARN_ON(ret == -EIO);
- return ret;
+ if (needs_modeset(crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ old_obj->resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
}
}
+ if (new_state->fence) { /* explicit fencing */
+ ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+ new_state->fence,
+ I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
if (!obj)
return 0;
- /* For framebuffer backed by dmabuf, wait for fence */
- resv = i915_gem_object_get_dmabuf_resv(obj);
- if (resv) {
- long lret;
-
- lret = reservation_object_wait_timeout_rcu(resv, false, true,
- MAX_SCHEDULE_TIMEOUT);
- if (lret == -ERESTARTSYS)
- return lret;
-
- WARN(lret < 0, "waiting returns %li\n", lret);
+ if (!new_state->fence) { /* implicit fencing */
+ ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ obj->resv, NULL,
+ false, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
}
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
- if (ret)
+ if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
+ return ret;
+ }
} else {
struct i915_vma *vma;
vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma))
- ret = PTR_ERR(vma);
- }
-
- if (ret == 0) {
- to_intel_plane_state(new_state)->wait_req =
- i915_gem_active_get(&obj->last_write,
- &obj->base.dev->struct_mutex);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+ return PTR_ERR(vma);
+ }
}
- return ret;
+ return 0;
}
/**
@@ -14842,7 +14822,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct intel_plane_state *old_intel_state;
- struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14854,9 +14833,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
-
- i915_gem_request_assign(&intel_state->wait_req, NULL);
- i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
int
@@ -14976,9 +14952,6 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
*/
void intel_plane_destroy(struct drm_plane *plane)
{
- if (!plane)
- return;
-
drm_plane_cleanup(plane);
kfree(to_intel_plane(plane));
}
@@ -14992,13 +14965,11 @@ const struct drm_plane_funcs intel_plane_funcs = {
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
-
};
-static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
@@ -15007,17 +14978,22 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
+ if (!primary) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&primary->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
primary->base.state = &state->base;
primary->can_scale = false;
primary->max_downscale = 1;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
primary->can_scale = true;
state->scaler_id = -1;
}
@@ -15025,10 +15001,10 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->plane = pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
primary->plane = !pipe;
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -15040,7 +15016,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->update_plane = ironlake_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
- } else if (INTEL_INFO(dev)->gen >= 4) {
+ } else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -15054,21 +15030,21 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
+ 0, &intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY,
"plane %c", plane_name(primary->plane));
@@ -15093,13 +15069,13 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
- return &primary->base;
+ return primary;
fail:
kfree(state);
kfree(primary);
- return NULL;
+ return ERR_PTR(ret);
}
static int
@@ -15195,21 +15171,25 @@ intel_update_cursor_plane(struct drm_plane *plane,
intel_crtc_update_cursor(crtc, state);
}
-static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
- int pipe)
+static struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *cursor = NULL;
struct intel_plane_state *state = NULL;
int ret;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (!cursor)
+ if (!cursor) {
+ ret = -ENOMEM;
goto fail;
+ }
state = intel_create_plane_state(&cursor->base);
- if (!state)
+ if (!state) {
+ ret = -ENOMEM;
goto fail;
+ }
+
cursor->base.state = &state->base;
cursor->can_scale = false;
@@ -15221,8 +15201,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
- ret = drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR,
@@ -15236,76 +15216,94 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
DRM_ROTATE_0 |
DRM_ROTATE_180);
- if (INTEL_INFO(dev)->gen >=9)
+ if (INTEL_GEN(dev_priv) >= 9)
state->scaler_id = -1;
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
- return &cursor->base;
+ return cursor;
fail:
kfree(state);
kfree(cursor);
- return NULL;
+ return ERR_PTR(ret);
}
-static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+static void skl_init_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
int i;
- struct intel_scaler *intel_scaler;
- struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
- for (i = 0; i < intel_crtc->num_scalers; i++) {
- intel_scaler = &scaler_state->scalers[i];
- intel_scaler->in_use = 0;
- intel_scaler->mode = PS_SCALER_MODE_DYN;
+ for (i = 0; i < crtc->num_scalers; i++) {
+ struct intel_scaler *scaler = &scaler_state->scalers[i];
+
+ scaler->in_use = 0;
+ scaler->mode = PS_SCALER_MODE_DYN;
}
scaler_state->scaler_id = -1;
}
-static void intel_crtc_init(struct drm_device *dev, int pipe)
+static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
- struct drm_plane *primary = NULL;
- struct drm_plane *cursor = NULL;
- int ret;
+ struct intel_plane *primary = NULL;
+ struct intel_plane *cursor = NULL;
+ int sprite, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
- if (intel_crtc == NULL)
- return;
+ if (!intel_crtc)
+ return -ENOMEM;
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state)
+ if (!crtc_state) {
+ ret = -ENOMEM;
goto fail;
+ }
intel_crtc->config = crtc_state;
intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
/* initialize shared scalers */
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
if (pipe == PIPE_C)
intel_crtc->num_scalers = 1;
else
intel_crtc->num_scalers = SKL_NUM_SCALERS;
- skl_init_scalers(dev, intel_crtc, crtc_state);
+ skl_init_scalers(dev_priv, intel_crtc, crtc_state);
}
- primary = intel_primary_plane_create(dev, pipe);
- if (!primary)
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
goto fail;
+ }
+
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
- cursor = intel_cursor_plane_create(dev, pipe);
- if (!cursor)
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (!plane) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (!cursor) {
+ ret = PTR_ERR(cursor);
goto fail;
+ }
- ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs,
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+ &primary->base, &cursor->base,
+ &intel_crtc_funcs,
"pipe %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -15315,8 +15313,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
* is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
- if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
+ intel_crtc->plane = (enum plane) pipe;
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
@@ -15329,21 +15327,26 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
- dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_color_init(&intel_crtc->base);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
- return;
+
+ return 0;
fail:
- intel_plane_destroy(primary);
- intel_plane_destroy(cursor);
+ /*
+ * drm_mode_config_cleanup() will free up any
+ * crtcs/planes already initialized.
+ */
kfree(crtc_state);
kfree(intel_crtc);
+
+ return ret;
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -15393,11 +15396,9 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
return index_mask;
}
-static bool has_edp_a(struct drm_device *dev)
+static bool has_edp_a(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!IS_MOBILE(dev))
+ if (!IS_MOBILE(dev_priv))
return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
@@ -15537,7 +15538,7 @@ static void intel_setup_outputs(struct drm_device *dev)
int found;
dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
- if (has_edp_a(dev))
+ if (has_edp_a(dev_priv))
intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@@ -15929,7 +15930,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- i915_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -16332,11 +16333,11 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
- intel_init_clock_gating(dev);
+ intel_init_clock_gating(dev_priv);
}
/*
@@ -16420,11 +16421,10 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
-void intel_modeset_init(struct drm_device *dev)
+int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
@@ -16442,10 +16442,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_quirks(dev);
- intel_init_pm(dev);
+ intel_init_pm(dev_priv);
if (INTEL_INFO(dev)->num_pipes == 0)
- return;
+ return 0;
/*
* There may be no VBT; and if the BIOS enabled SSC we can
@@ -16494,22 +16494,22 @@ void intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
- intel_crtc_init(dev, pipe);
- for_each_sprite(dev_priv, pipe, sprite) {
- ret = intel_plane_init(dev, pipe, sprite);
- if (ret)
- DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
- pipe_name(pipe), sprite_name(pipe, sprite), ret);
+ int ret;
+
+ ret = intel_crtc_init(dev_priv, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
}
}
intel_update_czclk(dev_priv);
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv);
intel_shared_dpll_init(dev);
if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev);
+ intel_update_max_cdclk(dev_priv);
/* Just disable it once at startup */
i915_disable_vga(dev);
@@ -16548,6 +16548,8 @@ void intel_modeset_init(struct drm_device *dev)
* since the watermark calculation done here will use pstate->fb.
*/
sanitize_watermarks(dev);
+
+ return 0;
}
static void intel_enable_pipe_a(struct drm_device *dev)
@@ -16877,7 +16879,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
encoder->base.crtc = &crtc->base;
crtc->config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc->config);
@@ -16978,7 +16981,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
}
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 3c2293b..9df331b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -213,6 +213,81 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk;
}
+static int
+intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+{
+ if (intel_dp->num_sink_rates) {
+ *sink_rates = intel_dp->sink_rates;
+ return intel_dp->num_sink_rates;
+ }
+
+ *sink_rates = default_rates;
+
+ return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+}
+
+static int
+intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int size;
+
+ if (IS_BROXTON(dev_priv)) {
+ *source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ *source_rates = skl_rates;
+ size = ARRAY_SIZE(skl_rates);
+ } else {
+ *source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates);
+ }
+
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (!intel_dp_source_supports_hbr2(intel_dp))
+ size--;
+
+ return size;
+}
+
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+static int intel_dp_common_rates(struct intel_dp *intel_dp,
+ int *common_rates)
+{
+ const int *source_rates, *sink_rates;
+ int source_len, sink_len;
+
+ sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
+ source_len = intel_dp_source_rates(intel_dp, &source_rates);
+
+ return intersect_rates(source_rates, source_len,
+ sink_rates, sink_len,
+ common_rates);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -320,8 +395,7 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -359,7 +433,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = IS_CHERRYVIEW(dev_priv) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
- if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev_priv) ?
+ if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
@@ -383,7 +457,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev, pipe);
+ vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
chv_phy_powergate_ch(dev_priv, phy, ch, false);
@@ -1291,19 +1365,6 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
-static int
-intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
-{
- if (intel_dp->num_sink_rates) {
- *sink_rates = intel_dp->sink_rates;
- return intel_dp->num_sink_rates;
- }
-
- *sink_rates = default_rates;
-
- return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1316,31 +1377,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
return false;
}
-static int
-intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- int size;
-
- if (IS_BROXTON(dev_priv)) {
- *source_rates = bxt_rates;
- size = ARRAY_SIZE(bxt_rates);
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- *source_rates = skl_rates;
- size = ARRAY_SIZE(skl_rates);
- } else {
- *source_rates = default_rates;
- size = ARRAY_SIZE(default_rates);
- }
-
- /* This depends on the fact that 5.4 is last value in the array */
- if (!intel_dp_source_supports_hbr2(intel_dp))
- size--;
-
- return size;
-}
-
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -1375,43 +1411,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
}
}
-static int intersect_rates(const int *source_rates, int source_len,
- const int *sink_rates, int sink_len,
- int *common_rates)
-{
- int i = 0, j = 0, k = 0;
-
- while (i < source_len && j < sink_len) {
- if (source_rates[i] == sink_rates[j]) {
- if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
- return k;
- common_rates[k] = source_rates[i];
- ++k;
- ++i;
- ++j;
- } else if (source_rates[i] < sink_rates[j]) {
- ++i;
- } else {
- ++j;
- }
- }
- return k;
-}
-
-static int intel_dp_common_rates(struct intel_dp *intel_dp,
- int *common_rates)
-{
- const int *source_rates, *sink_rates;
- int source_len, sink_len;
-
- sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
- source_len = intel_dp_source_rates(intel_dp, &source_rates);
-
- return intersect_rates(source_rates, source_len,
- sink_rates, sink_len,
- common_rates);
-}
-
static void snprintf_int_array(char *str, size_t len,
const int *array, int nelem)
{
@@ -1451,40 +1450,35 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
-static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+bool
+__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
{
- uint8_t rev;
- int len;
-
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
+ u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
+ DP_SINK_OUI;
- if (!drm_dp_is_branch(intel_dp->dpcd))
- return;
-
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
- if (len < 0)
- return;
-
- DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+ return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
+ sizeof(*desc);
}
-static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+bool intel_dp_read_desc(struct intel_dp *intel_dp)
{
- uint8_t rev[2];
- int len;
+ struct intel_dp_desc *desc = &intel_dp->desc;
+ bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_OUI_SUPPORT;
+ int dev_id_len;
- if ((drm_debug & DRM_UT_KMS) == 0)
- return;
-
- if (!drm_dp_is_branch(intel_dp->dpcd))
- return;
+ if (!__intel_dp_read_desc(intel_dp, desc))
+ return false;
- len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
- if (len < 0)
- return;
+ dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
+ DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
+ drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
+ (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
+ dev_id_len, desc->device_id,
+ desc->hw_rev >> 4, desc->hw_rev & 0xf,
+ desc->sw_major_rev, desc->sw_minor_rev);
- DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+ return true;
}
static int rate_to_index(int find, const int *rates)
@@ -2369,7 +2363,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
- intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
+ intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -3492,7 +3486,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -3502,7 +3496,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static bool
+bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
@@ -3526,6 +3520,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
+ intel_dp_read_desc(intel_dp);
+
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
@@ -3627,23 +3623,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return true;
}
-static void
-intel_dp_probe_oui(struct intel_dp *intel_dp)
-{
- u8 buf[3];
-
- if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
- return;
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
- DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
-}
-
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
@@ -3687,7 +3666,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret = 0;
@@ -3708,7 +3687,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
}
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -3731,7 +3710,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
@@ -3759,14 +3738,14 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return -EIO;
}
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
return 0;
}
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int count, ret;
@@ -3777,7 +3756,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
return ret;
do {
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
@@ -4010,7 +3989,7 @@ intel_dp_retrain_link(struct intel_dp *intel_dp)
intel_dp_stop_link_train(intel_dp);
/* Keep underrun reporting disabled until things are stable */
- intel_wait_for_vblank(&dev_priv->drm, crtc->pipe);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc->config->has_pch_encoder)
@@ -4422,10 +4401,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp_print_rates(intel_dp);
- intel_dp_probe_oui(intel_dp);
-
- intel_dp_print_hw_revision(intel_dp);
- intel_dp_print_sw_revision(intel_dp);
+ intel_dp_read_desc(intel_dp);
intel_dp_configure_mst(intel_dp);
@@ -4489,21 +4465,11 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum drm_connector_status status = connector->status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (intel_dp->is_mst) {
- /* MST devices are disconnected from a monitor POV */
- intel_dp_unset_edid(intel_dp);
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
- return connector_status_disconnected;
- }
-
/* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done)
status = intel_dp_long_pulse(intel_dp->attached_connector);
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 047f487..7a8e82d 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -23,6 +23,565 @@
#include "intel_drv.h"
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT). ::
+ *
+ *
+ * Dual channel PHY (VLV/CHV/BXT)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV/BXT)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
+ */
+
+/**
+ * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ */
+struct bxt_ddi_phy_info {
+ /**
+ * @dual_channel: true if this phy has a second channel.
+ */
+ bool dual_channel;
+
+ /**
+ * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
+ * Otherwise the GRC value will be copied from the phy indicated by
+ * this field.
+ */
+ enum dpio_phy rcomp_phy;
+
+ /**
+ * @channel: struct containing per channel information.
+ */
+ struct {
+ /**
+ * @port: which port maps to this channel.
+ */
+ enum port port;
+ } channel[2];
+};
+
+static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = true,
+ .rcomp_phy = DPIO_PHY1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ [DPIO_CH1] = { .port = PORT_C },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+};
+
+static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
+{
+ return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
+ BIT(phy_info->channel[DPIO_CH0].port);
+}
+
+void bxt_port_to_phy_channel(enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
+ phy_info = &bxt_ddi_phy_info[i];
+
+ if (port == phy_info->channel[DPIO_CH0].port) {
+ *phy = i;
+ *ch = DPIO_CH0;
+ return;
+ }
+
+ if (phy_info->dual_channel &&
+ port == phy_info->channel[DPIO_CH1].port) {
+ *phy = i;
+ *ch = DPIO_CH1;
+ return;
+ }
+ }
+
+ WARN(1, "PHY not found for PORT %c", port_name(port));
+ *phy = DPIO_PHY0;
+ *ch = DPIO_CH0;
+}
+
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis)
+{
+ u32 val;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+ val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+ val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+ val &= ~SCALE_DCOMP_METHOD;
+ if (enable)
+ val |= SCALE_DCOMP_METHOD;
+
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+
+ I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+ val &= ~DE_EMPHASIS;
+ val |= deemphasis << DEEMPH_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+}
+
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum port port;
+
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+ return false;
+
+ if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+ phy);
+
+ return false;
+ }
+
+ if (phy_info->rcomp_phy == -1 &&
+ !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
+ phy);
+
+ return false;
+ }
+
+ if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+ phy);
+
+ return false;
+ }
+
+ for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
+ u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+ if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+ "for port %c powered down "
+ "(PHY_CTL %08x)\n",
+ phy, port_name(port), tmp);
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+
+ return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ if (intel_wait_for_register(dev_priv,
+ BXT_PORT_REF_DW3(phy),
+ GRC_DONE, GRC_DONE,
+ 10))
+ DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ u32 val;
+
+ if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ /* Still read out the GRC value for state verification */
+ if (phy_info->rcomp_phy != -1)
+ dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+
+ if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
+
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
+ }
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val |= GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+ /*
+ * The PHY registers start out inaccessible and respond to reads with
+ * all 1s. Eventually they become accessible as they power up, then
+ * the reserved bit will give the default 0. Poll on the reserved bit
+ * becoming 0 to find when the PHY is accessible.
+ * HW team confirmed that the time to reach phypowergood status is
+ * anywhere between 50 us and 100us.
+ */
+ if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
+ DRM_ERROR("timeout during PHY%d power on\n", phy);
+ }
+
+ /* Program PLL Rcomp code offset */
+ val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val &= ~IREF0RC_OFFSET_MASK;
+ val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+ val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val &= ~IREF1RC_OFFSET_MASK;
+ val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+ /* Program power gating */
+ val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+ SUS_CLK_CONFIG;
+ I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+ if (phy_info->dual_channel) {
+ val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+ val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+ I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+ }
+
+ if (phy_info->rcomp_phy != -1) {
+ uint32_t grc_code;
+ /*
+ * PHY0 isn't connected to an RCOMP resistor so copy over
+ * the corresponding calibrated value from PHY1, and disable
+ * the automatic calibration on PHY0.
+ */
+ val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+ phy_info->rcomp_phy);
+ grc_code = val << GRC_CODE_FAST_SHIFT |
+ val << GRC_CODE_SLOW_SHIFT |
+ val;
+ I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+
+ val = I915_READ(BXT_PORT_REF_DW8(phy));
+ val |= GRC_DIS | GRC_RDY_OVRD;
+ I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+ }
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val |= COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ if (phy_info->rcomp_phy == -1)
+ bxt_phy_wait_grc_done(dev_priv, phy);
+
+}
+
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ uint32_t val;
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val &= ~COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val &= ~GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+}
+
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
+ bool was_enabled;
+
+ lockdep_assert_held(&dev_priv->power_domains.lock);
+
+ if (rcomp_phy != -1) {
+ was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+
+ /*
+ * We need to copy the GRC calibration value from rcomp_phy,
+ * so make sure it's powered up.
+ */
+ if (!was_enabled)
+ _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+ }
+
+ _bxt_ddi_phy_init(dev_priv, phy);
+
+ if (rcomp_phy != -1 && !was_enabled)
+ bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ i915_reg_t reg, u32 mask, u32 expected,
+ const char *reg_fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ u32 val;
+
+ val = I915_READ(reg);
+ if ((val & mask) == expected)
+ return true;
+
+ va_start(args, reg_fmt);
+ vaf.fmt = reg_fmt;
+ vaf.va = &args;
+
+ DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ "current %08x, expected %08x (mask %08x)\n",
+ phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+ mask);
+
+ va_end(args);
+
+ return false;
+}
+
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ uint32_t mask;
+ bool ok;
+
+#define _CHK(reg, mask, exp, fmt, ...) \
+ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ ## __VA_ARGS__)
+
+ if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ return false;
+
+ ok = true;
+
+ /* PLL Rcomp code offset */
+ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
+ ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+ /* Power gating */
+ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+ ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+ if (phy_info->dual_channel)
+ ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
+ DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+ "BXT_PORT_CL2CM_DW6(%d)", phy);
+
+ if (phy_info->rcomp_phy != -1) {
+ u32 grc_code = dev_priv->bxt_phy_grc;
+
+ grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+ grc_code << GRC_CODE_SLOW_SHIFT |
+ grc_code;
+ mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+ GRC_CODE_NOM_MASK;
+ ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
+ "BXT_PORT_REF_DW6(%d)", phy);
+
+ mask = GRC_DIS | GRC_RDY_OVRD;
+ ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
+ "BXT_PORT_REF_DW8(%d)", phy);
+ }
+
+ return ok;
+#undef _CHK
+}
+
+uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_count)
+{
+ switch (lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return BIT(2) | BIT(0);
+ case 4:
+ return BIT(3) | BIT(2) | BIT(0);
+ default:
+ MISSING_CASE(lane_count);
+
+ return 0;
+ }
+}
+
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ uint8_t lane_lat_optim_mask)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (lane_lat_optim_mask & BIT(lane))
+ val |= LATENCY_OPTIM;
+
+ I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+ }
+}
+
+uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+ uint8_t mask;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
+
+ mask = 0;
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ if (val & LATENCY_OPTIM)
+ mask |= BIT(lane);
+ }
+
+ return mask;
+}
+
+
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 605d0b5..21853a1 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1371,6 +1371,10 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
{
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1378,72 +1382,72 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
/* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->config.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(port, 0));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->config.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
- temp = I915_READ(BXT_PORT_PLL(port, 1));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->config.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(port, 2));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->config.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(port, 3));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->config.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(port, 6));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->config.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(port, 8));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->config.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 9));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->config.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
- temp = I915_READ(BXT_PORT_PLL(port, 10));
+ temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->config.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+ I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->config.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1459,11 +1463,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->config.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+ I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1485,6 +1489,10 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
bool ret;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
@@ -1495,36 +1503,36 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -1533,11 +1541,11 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+ hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4e90b07..398195b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -365,6 +365,8 @@ struct intel_atomic_state {
/* Gen9+ only */
struct skl_wm_values wm_results;
+
+ struct i915_sw_fence commit_ready;
};
struct intel_plane_state {
@@ -401,9 +403,6 @@ struct intel_plane_state {
int scaler_id;
struct drm_intel_sprite_colorkey ckey;
-
- /* async flip related structures */
- struct drm_i915_gem_request *wait_req;
};
struct intel_initial_plane_config {
@@ -501,14 +500,6 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
-
- /* cached plane data rate */
- unsigned plane_data_rate[I915_MAX_PLANES];
- unsigned plane_y_data_rate[I915_MAX_PLANES];
-
- /* minimum block allocation */
- uint16_t minimum_blocks[I915_MAX_PLANES];
- uint16_t minimum_y_blocks[I915_MAX_PLANES];
} skl;
};
@@ -731,7 +722,6 @@ struct intel_crtc {
/* watermarks currently being used */
union {
struct intel_pipe_wm ilk;
- struct skl_pipe_wm skl;
} active;
/* allow CxSR on this pipe */
@@ -883,6 +873,14 @@ enum link_m_n_set {
M2_N2
};
+struct intel_dp_desc {
+ u8 oui[3];
+ u8 device_id[6];
+ u8 hw_rev;
+ u8 sw_major_rev;
+ u8 sw_minor_rev;
+} __packed;
+
struct intel_dp {
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
@@ -905,6 +903,8 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
+ /* sink or branch descriptor */
+ struct intel_dp_desc desc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
@@ -964,7 +964,7 @@ struct intel_dp {
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
- struct drm_dp_aux *aux;
+ bool desc_valid;
};
struct intel_digital_port {
@@ -1028,17 +1028,15 @@ vlv_pipe_to_channel(enum pipe pipe)
}
}
-static inline struct drm_crtc *
-intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+static inline struct intel_crtc *
+intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->pipe_to_crtc_mapping[pipe];
}
-static inline struct drm_crtc *
-intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+static inline struct intel_crtc *
+intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->plane_to_crtc_mapping[plane];
}
@@ -1098,15 +1096,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
-/*
- * Returns the number of planes for this pipe, ie the number of sprites + 1
- * (primary plane). This doesn't count the cursor plane then.
- */
-static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
-{
- return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
-}
-
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@@ -1123,6 +1112,9 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1145,6 +1137,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -1247,18 +1242,17 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
static inline void
-intel_wait_for_vblank(struct drm_device *dev, int pipe)
+intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(dev, pipe);
+ drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
{
- const struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
+ const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->active)
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
@@ -1305,9 +1299,9 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
-int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
/* modesetting asserts */
@@ -1335,12 +1329,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
@@ -1358,7 +1346,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-bool intel_crtc_active(struct drm_crtc *crtc);
+bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
@@ -1451,6 +1439,11 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+bool __intel_dp_read_desc(struct intel_dp *intel_dp,
+ struct intel_dp_desc *desc);
+bool intel_dp_read_desc(struct intel_dp *intel_dp);
+
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1655,23 +1648,6 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
-static inline int
-assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
-{
- int seq = atomic_read(&dev_priv->pm.atomic_seq);
-
- assert_rpm_wakelock_held(dev_priv);
-
- return seq;
-}
-
-static inline void
-assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
-{
- WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
- "HW access outside of RPM atomic section\n");
-}
-
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance
@@ -1727,11 +1703,11 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pm.c */
-void intel_init_clock_gating(struct drm_device *dev);
-void intel_suspend_hw(struct drm_device *dev);
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_update_watermarks(struct drm_crtc *crtc);
-void intel_init_pm(struct drm_device *dev);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -1790,7 +1766,8 @@ bool intel_sdvo_init(struct drm_device *dev,
/* intel_sprite.c */
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
-int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index cd57490..7086454 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -393,12 +393,12 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
* its timings to get how the BIOS set up the panel.
*/
if (dvo_val & DVO_ENABLE) {
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc) {
- mode = intel_crtc_mode_get(dev, crtc);
+ mode = intel_crtc_mode_get(dev, &crtc->base);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 8cceb34..841f8d1 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -174,7 +174,7 @@ cleanup:
return ret;
}
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -204,13 +204,13 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
kunmap(page);
}
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
- engine->last_submitted_seqno = seqno;
+
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+ engine->timeline->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
@@ -220,15 +220,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
intel_engine_wakeup(engine);
}
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
-static void intel_engine_init_requests(struct intel_engine_cs *engine)
+static void intel_engine_init_timeline(struct intel_engine_cs *engine)
{
- init_request_active(&engine->last_request, NULL);
- INIT_LIST_HEAD(&engine->request_list);
+ engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
/**
@@ -245,9 +239,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&engine->execlist_queue);
spin_lock_init(&engine->execlist_lock);
- engine->fence_context = dma_fence_context_alloc(1);
-
- intel_engine_init_requests(engine);
+ intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
i915_gem_batch_pool_init(engine, &engine->batch_pool);
@@ -264,7 +256,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
if (!obj)
- obj = i915_gem_object_create(&engine->i915->drm, size);
+ obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
@@ -314,6 +306,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
return ret;
+ ret = i915_gem_render_state_init(engine);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -328,6 +324,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
intel_engine_cleanup_scratch(engine);
+ i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index cbe2ebd..e230d48 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1306,7 +1306,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
return;
for_each_intel_crtc(&dev_priv->drm, crtc)
- if (intel_crtc_active(&crtc->base) &&
+ if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->base.primary->state)->base.visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 3018f4f..e660d8b 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -57,7 +57,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->cpu_fifo_underrun_disabled)
return false;
@@ -75,7 +75,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc->pch_fifo_underrun_disabled)
return false;
@@ -245,14 +245,13 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
assert_spin_locked(&dev_priv->irq_lock);
- old = !intel_crtc->cpu_fifo_underrun_disabled;
- intel_crtc->cpu_fifo_underrun_disabled = !enable;
+ old = !crtc->cpu_fifo_underrun_disabled;
+ crtc->cpu_fifo_underrun_disabled = !enable;
if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
@@ -314,8 +313,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
unsigned long flags;
bool old;
@@ -330,8 +329,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- old = !intel_crtc->pch_fifo_underrun_disabled;
- intel_crtc->pch_fifo_underrun_disabled = !enable;
+ old = !crtc->pch_fifo_underrun_disabled;
+ crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(&dev_priv->drm,
@@ -358,7 +357,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@@ -366,7 +365,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv) &&
- to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
+ crtc->cpu_fifo_underrun_disabled)
return;
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 5cdf7aa..0053258 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -64,7 +64,7 @@ struct drm_i915_gem_request;
*/
struct i915_guc_client {
struct i915_vma *vma;
- void *client_base; /* first page (only) of above */
+ void *vaddr;
struct i915_gem_context *owner;
struct intel_guc *guc;
@@ -123,10 +123,28 @@ struct intel_guc_fw {
uint32_t ucode_offset;
};
+struct intel_guc_log {
+ uint32_t flags;
+ struct i915_vma *vma;
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+
+ /* logging related stats */
+ u32 capture_miss_count;
+ u32 flush_interrupt_count;
+ u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
struct intel_guc {
struct intel_guc_fw guc_fw;
- uint32_t log_flags;
- struct i915_vma *log_vma;
+ struct intel_guc_log log;
+
+ /* GuC2Host interrupt related state */
+ bool interrupts_enabled;
struct i915_vma *ads_vma;
struct i915_vma *ctx_pool_vma;
@@ -146,6 +164,9 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
+
+ /* To serialize the Host2GuC actions */
+ struct mutex action_lock;
};
/* intel_guc_loader.c */
@@ -163,5 +184,10 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
+void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
+void i915_guc_register(struct drm_i915_private *dev_priv);
+void i915_guc_unregister(struct drm_i915_private *dev_priv);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index e40db2d..324ea90 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -104,9 +104,9 @@
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_DPC_PAGES 3
+#define GUC_LOG_DPC_PAGES 7
#define GUC_LOG_DPC_SHIFT 6
-#define GUC_LOG_ISR_PAGES 3
+#define GUC_LOG_ISR_PAGES 7
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
@@ -419,15 +419,87 @@ struct guc_ads {
u32 reserved2[4];
} __packed;
+/* GuC logging structures */
+
+enum guc_log_buffer_type {
+ GUC_ISR_LOG_BUFFER,
+ GUC_DPC_LOG_BUFFER,
+ GUC_CRASH_DUMP_LOG_BUFFER,
+ GUC_MAX_LOG_BUFFER
+};
+
+/**
+ * DOC: GuC Log buffer Layout
+ *
+ * Page0 +-------------------------------+
+ * | ISR state header (32 bytes) |
+ * | DPC state header |
+ * | Crash dump state header |
+ * Page1 +-------------------------------+
+ * | ISR logs |
+ * Page9 +-------------------------------+
+ * | DPC logs |
+ * Page17 +-------------------------------+
+ * | Crash Dump logs |
+ * +-------------------------------+
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where i915 read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for i915.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & i915 has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt i915 should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+ u32 marker[2];
+ u32 read_ptr;
+ u32 write_ptr;
+ u32 size;
+ u32 sampled_write_ptr;
+ union {
+ struct {
+ u32 flush_to_file:1;
+ u32 buffer_full_cnt:4;
+ u32 reserved:27;
+ };
+ u32 flags;
+ };
+ u32 version;
+} __packed;
+
+union guc_log_control {
+ struct {
+ u32 logging_enabled:1;
+ u32 reserved1:3;
+ u32 verbosity:4;
+ u32 reserved2:24;
+ };
+ u32 value;
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+ HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
HOST2GUC_ACTION_LIMIT
};
@@ -449,4 +521,10 @@ enum guc2host_status {
GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
};
+/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+enum guc2host_message {
+ GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
+ GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
+};
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 3c8eaae..1aa8523 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -211,11 +211,13 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
+ params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
if (i915.guc_log_level >= 0) {
- params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
params[GUC_CTL_DEBUG] =
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
- }
+ } else
+ params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) {
u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
@@ -483,6 +485,7 @@ int intel_guc_setup(struct drm_device *dev)
}
guc_interrupts_release(dev_priv);
+ gen9_reset_guc_interrupts(dev_priv);
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
@@ -527,6 +530,9 @@ int intel_guc_setup(struct drm_device *dev)
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
if (i915.enable_guc_submission) {
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
new file mode 100644
index 0000000..53df5b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static bool
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
+{
+ if (INTEL_GEN(engine->i915) >= 8) {
+ return (ipehr >> 23) == 0x1c;
+ } else {
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
+ }
+}
+
+static struct intel_engine_cs *
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+ u64 offset)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ enum intel_engine_id id;
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ for_each_engine(signaller, dev_priv, id) {
+ if (engine == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
+ return signaller;
+ }
+ } else {
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+
+ for_each_engine(signaller, dev_priv, id) {
+ if(engine == signaller)
+ continue;
+
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
+ return signaller;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
+ engine->name, ipehr, offset);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct intel_engine_cs *
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ void __iomem *vaddr;
+ u32 cmd, ipehr, head;
+ u64 offset = 0;
+ int i, backwards;
+
+ /*
+ * This function does not support execlist mode - any attempt to
+ * proceed further into this function will result in a kernel panic
+ * when dereferencing ring->buffer, which is not set up in execlist
+ * mode.
+ *
+ * The correct way of doing it would be to derive the currently
+ * executing ring buffer from the current context, which is derived
+ * from the currently running request. Unfortunately, to get the
+ * current request we would have to grab the struct_mutex before doing
+ * anything else, which would be ill-advised since some other thread
+ * might have grabbed it already and managed to hang itself, causing
+ * the hang checker to deadlock.
+ *
+ * Therefore, this function does not support execlist mode in its
+ * current form. Just return NULL and move on.
+ */
+ if (engine->buffer == NULL)
+ return NULL;
+
+ ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ if (!ipehr_is_semaphore_wait(engine, ipehr))
+ return NULL;
+
+ /*
+ * HEAD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX. But limit it to just 3
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
+ * point at at batch, and semaphores are always emitted into the
+ * ringbuffer itself.
+ */
+ head = I915_READ_HEAD(engine) & HEAD_ADDR;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+ vaddr = (void __iomem *)engine->buffer->vaddr;
+
+ for (i = backwards; i; --i) {
+ /*
+ * Be paranoid and presume the hw has gone off into the wild -
+ * our ring is smaller than what the hardware (and hence
+ * HEAD_ADDR) allows. Also handles wrap-around.
+ */
+ head &= engine->buffer->size - 1;
+
+ /* This here seems to blow up */
+ cmd = ioread32(vaddr + head);
+ if (cmd == ipehr)
+ break;
+
+ head -= 4;
+ }
+
+ if (!i)
+ return NULL;
+
+ *seqno = ioread32(vaddr + head + 4) + 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ offset = ioread32(vaddr + head + 12);
+ offset <<= 32;
+ offset |= ioread32(vaddr + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
+}
+
+static int semaphore_passed(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_cs *signaller;
+ u32 seqno;
+
+ engine->hangcheck.deadlock++;
+
+ signaller = semaphore_waits_for(engine, &seqno);
+ if (signaller == NULL)
+ return -1;
+
+ if (IS_ERR(signaller))
+ return 0;
+
+ /* Prevent pathological recursion due to driver bugs */
+ if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
+ return -1;
+
+ if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+ return 1;
+
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ engine->hangcheck.deadlock = 0;
+}
+
+static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
+{
+ u32 tmp = current_instdone | *old_instdone;
+ bool unchanged;
+
+ unchanged = tmp == *old_instdone;
+ *old_instdone |= tmp;
+
+ return unchanged;
+}
+
+static bool subunits_stuck(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_instdone instdone;
+ struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
+ bool stuck;
+ int slice;
+ int subslice;
+
+ if (engine->id != RCS)
+ return true;
+
+ intel_engine_get_instdone(engine, &instdone);
+
+ /* There might be unstable subunit states even when
+ * actual head is not moving. Filter out the unstable ones by
+ * accumulating the undone -> done transitions and only
+ * consider those as progress.
+ */
+ stuck = instdone_unchanged(instdone.instdone,
+ &accu_instdone->instdone);
+ stuck &= instdone_unchanged(instdone.slice_common,
+ &accu_instdone->slice_common);
+
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
+ &accu_instdone->sampler[slice][subslice]);
+ stuck &= instdone_unchanged(instdone.row[slice][subslice],
+ &accu_instdone->row[slice][subslice]);
+ }
+
+ return stuck;
+}
+
+static enum intel_engine_hangcheck_action
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ if (acthd != engine->hangcheck.acthd) {
+
+ /* Clear subunit states on head movement */
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+
+ return HANGCHECK_ACTIVE;
+ }
+
+ if (!subunits_stuck(engine))
+ return HANGCHECK_ACTIVE;
+
+ return HANGCHECK_HUNG;
+}
+
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ enum intel_engine_hangcheck_action ha;
+ u32 tmp;
+
+ ha = head_stuck(engine, acthd);
+ if (ha != HANGCHECK_HUNG)
+ return ha;
+
+ if (IS_GEN2(dev_priv))
+ return HANGCHECK_HUNG;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ tmp = I915_READ_CTL(engine);
+ if (tmp & RING_WAIT) {
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck wait on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ switch (semaphore_passed(engine)) {
+ default:
+ return HANGCHECK_HUNG;
+ case 1:
+ i915_handle_error(dev_priv, 0,
+ "Kicking stuck semaphore on %s",
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
+ return HANGCHECK_KICK;
+ case 0:
+ return HANGCHECK_WAIT;
+ }
+ }
+
+ return HANGCHECK_HUNG;
+}
+
+/*
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
+ */
+static void i915_hangcheck_elapsed(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ gpu_error.hangcheck_work.work);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int hung = 0, stuck = 0;
+ int busy_count = 0;
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define ACTIVE_DECAY 15
+
+ if (!i915.enable_hangcheck)
+ return;
+
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
+
+ /* As enabling the GPU requires fairly extensive mmio access,
+ * periodically arm the mmio checker to see if we are triggering
+ * any invalid access.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+ for_each_engine(engine, dev_priv, id) {
+ bool busy = intel_engine_has_waiter(engine);
+ u64 acthd;
+ u32 seqno;
+ u32 submit;
+
+ semaphore_clear_deadlocks(dev_priv);
+
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ acthd = intel_engine_get_active_head(engine);
+ seqno = intel_engine_get_seqno(engine);
+ submit = intel_engine_last_submit(engine);
+
+ if (engine->hangcheck.seqno == seqno) {
+ if (i915_seqno_passed(seqno, submit)) {
+ engine->hangcheck.action = HANGCHECK_IDLE;
+ } else {
+ /* We always increment the hangcheck score
+ * if the engine is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * engine is in a legitimate wait for another
+ * engine. In that case the waiting engine is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, add a small increment to the
+ * score so that we can catch a batch that is
+ * being repeatedly kicked and so responsible
+ * for stalling the machine.
+ */
+ engine->hangcheck.action =
+ engine_stuck(engine, acthd);
+
+ switch (engine->hangcheck.action) {
+ case HANGCHECK_IDLE:
+ case HANGCHECK_WAIT:
+ break;
+ case HANGCHECK_ACTIVE:
+ engine->hangcheck.score += BUSY;
+ break;
+ case HANGCHECK_KICK:
+ engine->hangcheck.score += KICK;
+ break;
+ case HANGCHECK_HUNG:
+ engine->hangcheck.score += HUNG;
+ break;
+ }
+ }
+
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ hung |= intel_engine_flag(engine);
+ if (engine->hangcheck.action != HANGCHECK_HUNG)
+ stuck |= intel_engine_flag(engine);
+ }
+ } else {
+ engine->hangcheck.action = HANGCHECK_ACTIVE;
+
+ /* Gradually reduce the count so that we catch DoS
+ * attempts across multiple batches.
+ */
+ if (engine->hangcheck.score > 0)
+ engine->hangcheck.score -= ACTIVE_DECAY;
+ if (engine->hangcheck.score < 0)
+ engine->hangcheck.score = 0;
+
+ /* Clear head and subunit states on seqno movement */
+ acthd = 0;
+
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+ }
+
+ engine->hangcheck.seqno = seqno;
+ engine->hangcheck.acthd = acthd;
+ busy_count += busy;
+ }
+
+ if (hung) {
+ char msg[80];
+ unsigned int tmp;
+ int len;
+
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, dev_priv, hung, tmp)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(dev_priv, hung, msg);
+ }
+
+ /* Reset timer in case GPU hangs without another request being added */
+ if (busy_count)
+ i915_queue_hangcheck(dev_priv);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+ memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+void intel_hangcheck_init(struct drm_i915_private *i915)
+{
+ INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
+ i915_hangcheck_elapsed);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index af8715f..35ada4e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1164,7 +1164,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bc86585..dde04b764 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -365,7 +365,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
+ reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -440,7 +440,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last)
/* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_request()
+ * as we resubmit the request. See gen8_emit_breadcrumb()
* for where we prepare the padding after the end of the
* request.
*/
@@ -522,6 +522,28 @@ static bool execlists_elsp_idle(struct intel_engine_cs *engine)
return !engine->execlist_port[0].request;
}
+/**
+ * intel_execlists_idle() - Determine if all engine submission ports are idle
+ * @dev_priv: i915 device private
+ *
+ * Return true if there are no requests pending on any of the submission ports
+ * of any engines.
+ */
+bool intel_execlists_idle(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!i915.enable_execlists)
+ return true;
+
+ for_each_engine(engine, dev_priv, id)
+ if (!execlists_elsp_idle(engine))
+ return false;
+
+ return true;
+}
+
static bool execlists_elsp_ready(struct intel_engine_cs *engine)
{
int port;
@@ -599,6 +621,15 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
spin_lock_irqsave(&engine->execlist_lock, flags);
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ request->previous_context = engine->last_context;
+ engine->last_context = request->ctx;
+
list_add_tail(&request->execlist_link, &engine->execlist_queue);
if (execlists_elsp_idle(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
@@ -671,46 +702,6 @@ err_unpin:
return ret;
}
-/*
- * intel_logical_ring_advance() - advance the tail and prepare for submission
- * @request: Request to advance the logical ringbuffer of.
- *
- * The tail is updated in our logical ringbuffer struct, not in the actual context. What
- * really happens during submission is that the context and current tail will be placed
- * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
- * point, the tail *inside* the context is updated and the ELSP written to.
- */
-static int
-intel_logical_ring_advance(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- struct intel_engine_cs *engine = request->engine;
-
- intel_ring_advance(ring);
- request->tail = ring->tail;
-
- /*
- * Here we add two extra NOOPs as padding to avoid
- * lite restore of a context with HEAD==TAIL.
- *
- * Caller must reserve WA_TAIL_DWORDS for us!
- */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- request->wa_tail = ring->tail;
-
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- request->previous_context = engine->last_context;
- engine->last_context = request->ctx;
- return 0;
-}
-
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -744,7 +735,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
- ce->state->obj->dirty = true;
+ ce->state->obj->mm.dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission) {
@@ -1566,39 +1557,35 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-
-static int gen8_emit_request(struct drm_i915_gem_request *request)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
{
- struct intel_ring *ring = request->ring;
- int ret;
-
- ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+ *out++ = MI_NOOP;
+ *out++ = MI_NOOP;
+ request->wa_tail = intel_ring_offset(request->ring, out);
+}
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- intel_hws_seqno_address(request->engine) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, request->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+ *out++ = 0;
+ *out++ = request->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
+
+ gen8_emit_wa_tail(request, out);
}
-static int gen8_emit_request_render(struct drm_i915_gem_request *request)
-{
- struct intel_ring *ring = request->ring;
- int ret;
-
- ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
- if (ret)
- return ret;
+static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
+static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
+ u32 *out)
+{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1606,21 +1593,24 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(request));
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(request->engine);
+ *out++ = 0;
+ *out++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- return intel_logical_ring_advance(request);
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
+ request->tail = intel_ring_offset(request->ring, out);
+
+ gen8_emit_wa_tail(request, out);
}
+static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
+
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
@@ -1637,7 +1627,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return i915_gem_render_state_init(req);
+ return i915_gem_render_state_emit(req);
}
/**
@@ -1694,7 +1684,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_common_ring;
engine->reset_hw = reset_common_ring;
engine->emit_flush = gen8_emit_flush;
- engine->emit_request = gen8_emit_request;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
engine->submit_request = execlists_submit_request;
engine->irq_enable = gen8_logical_ring_enable_irq;
@@ -1816,7 +1807,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
- engine->emit_request = gen8_emit_request_render;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
ret = intel_engine_create_scratch(engine, 4096);
if (ret)
@@ -2042,7 +2034,7 @@ populate_lr_context(struct i915_gem_context *ctx,
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
- ctx_obj->dirty = true;
+ ctx_obj->mm.dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
@@ -2180,7 +2172,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
reg[CTX_RING_HEAD+1] = 0;
reg[CTX_RING_TAIL+1] = 0;
- ce->state->obj->dirty = true;
+ ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4fed816..c1f5461 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,5 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+bool intel_execlists_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 632149c..daa5234 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -27,10 +27,18 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include "intel_drv.h"
+static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+
+ return &dig_port->dp;
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
- struct i2c_adapter *adapter = &lspcon->aux->ddc;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
if (drm_lspcon_get_mode(adapter, &current_mode))
DRM_ERROR("Error reading LSPCON mode\n");
@@ -45,7 +53,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
{
int err;
enum drm_lspcon_mode current_mode;
- struct i2c_adapter *adapter = &lspcon->aux->ddc;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
err = drm_lspcon_get_mode(adapter, &current_mode);
if (err) {
@@ -72,7 +80,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
enum drm_dp_dual_mode_type adaptor_type;
- struct i2c_adapter *adapter = &lspcon->aux->ddc;
+ struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
/* Lets probe the adaptor and check its type */
adaptor_type = drm_dp_dual_mode_detect(adapter);
@@ -89,8 +97,43 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
return true;
}
+static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ unsigned long start = jiffies;
+
+ if (!lspcon->desc_valid)
+ return;
+
+ while (1) {
+ struct intel_dp_desc desc;
+
+ /*
+ * The w/a only applies in PCON mode and we don't expect any
+ * AUX errors.
+ */
+ if (!__intel_dp_read_desc(intel_dp, &desc))
+ return;
+
+ if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+ DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
+ jiffies_to_msecs(jiffies - start));
+ return;
+ }
+
+ if (time_after(jiffies, start + msecs_to_jiffies(1000)))
+ break;
+
+ usleep_range(10000, 15000);
+ }
+
+ DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
+}
+
void lspcon_resume(struct intel_lspcon *lspcon)
{
+ lspcon_resume_in_pcon_wa(lspcon);
+
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
DRM_ERROR("LSPCON resume failed\n");
else
@@ -111,7 +154,6 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
lspcon->active = false;
lspcon->mode = DRM_LSPCON_MODE_INVALID;
- lspcon->aux = &dp->aux;
if (!lspcon_probe(lspcon)) {
DRM_ERROR("Failed to probe lspcon\n");
@@ -131,6 +173,13 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
}
}
+ if (!intel_dp_read_dpcd(dp)) {
+ DRM_ERROR("LSPCON DPCD read failed\n");
+ return false;
+ }
+
+ lspcon->desc_valid = intel_dp_read_desc(dp);
+
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 199b90c..de7b3e6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -985,7 +985,7 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
i915_reg_t lvds_reg;
u32 lvds;
int pipe;
@@ -1163,10 +1163,10 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- fixed_mode = intel_crtc_mode_get(dev, crtc);
+ fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
if (fixed_mode) {
DRM_DEBUG_KMS("using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 25bcd4a1..fd0e4da 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1222,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
- i915_gem_object_put_unlocked(new_bo);
+ i915_gem_object_put(new_bo);
out_free:
kfree(params);
@@ -1466,7 +1466,7 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
- i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
+ i915_gem_object_put(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 560fc7a..cc9e0c0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
+#include <drm/drm_atomic_helper.h>
/**
* DOC: RC6
@@ -55,10 +56,8 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-static void gen9_init_clock_gating(struct drm_device *dev)
+static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
@@ -81,11 +80,9 @@ static void gen9_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DISABLE_DUMMY0);
}
-static void bxt_init_clock_gating(struct drm_device *dev)
+static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
@@ -107,9 +104,8 @@ static void bxt_init_clock_gating(struct drm_device *dev)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(CLKCFG);
@@ -146,9 +142,8 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
@@ -319,7 +314,6 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_device *dev = &dev_priv->drm;
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -329,7 +323,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
} else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev)) {
+ } else if (IS_PINEVIEW(dev_priv)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
@@ -377,10 +371,9 @@ static const int pessimal_latency_ns = 5000;
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-static int vlv_get_fifo_size(struct drm_device *dev,
+static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int sprite0_start, sprite1_start, size;
switch (pipe) {
@@ -429,9 +422,8 @@ static int vlv_get_fifo_size(struct drm_device *dev,
return size;
}
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -445,9 +437,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -462,9 +453,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -624,11 +614,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
return wm_size;
}
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- for_each_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -639,11 +629,10 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
return enabled;
}
-static void pineview_update_wm(struct drm_crtc *unused_crtc)
+static void pineview_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
@@ -658,10 +647,13 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
return;
}
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -708,7 +700,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
}
}
-static bool g4x_compute_wm0(struct drm_device *dev,
+static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
@@ -717,24 +709,26 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int *plane_wm,
int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int htotal, hdisplay, clock, cpp;
int line_time_us, line_count;
int entries, tlb_miss;
- crtc = intel_get_crtc_for_plane(dev, plane);
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
@@ -749,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * crtc->cursor->state->crtc_w * cpp;
+ entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -768,7 +762,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
-static bool g4x_check_srwm(struct drm_device *dev,
+static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
@@ -796,15 +790,16 @@ static bool g4x_check_srwm(struct drm_device *dev,
return true;
}
-static bool g4x_compute_srwm(struct drm_device *dev,
+static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
+ const struct drm_framebuffer *fb;
int hdisplay, htotal, cpp, clock;
unsigned long line_time_us;
int line_count, line_size;
@@ -816,12 +811,13 @@ static bool g4x_compute_srwm(struct drm_device *dev,
return false;
}
- crtc = intel_get_crtc_for_plane(dev, plane);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ crtc = intel_get_crtc_for_plane(dev_priv, plane);
+ adjusted_mode = &crtc->config->base.adjusted_mode;
+ fb = crtc->base.primary->state->fb;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ hdisplay = crtc->config->pipe_src_w;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -835,11 +831,11 @@ static bool g4x_compute_srwm(struct drm_device *dev,
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * cpp * crtc->cursor->state->crtc_w;
+ entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
- return g4x_check_srwm(dev,
+ return g4x_check_srwm(dev_priv,
*display_wm, *cursor_wm,
display, cursor);
}
@@ -939,10 +935,8 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_device *dev)
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1329,20 +1323,19 @@ static void vlv_merge_wm(struct drm_device *dev,
}
}
-static void vlv_update_wm(struct drm_crtc *crtc)
+static void vlv_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
struct vlv_wm_values wm = {};
- vlv_compute_wm(intel_crtc);
+ vlv_compute_wm(crtc);
vlv_merge_wm(dev, &wm);
if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
return;
}
@@ -1358,9 +1351,9 @@ static void vlv_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, false);
/* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(intel_crtc);
+ vlv_pipe_set_fifo_size(crtc);
- vlv_write_wm_values(intel_crtc, &wm);
+ vlv_write_wm_values(crtc, &wm);
DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
"sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
@@ -1384,30 +1377,29 @@ static void vlv_update_wm(struct drm_crtc *crtc)
#define single_plane_enabled(mask) is_power_of_2(mask)
-static void g4x_update_wm(struct drm_crtc *crtc)
+static void g4x_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = to_i915(dev);
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
- if (g4x_compute_wm0(dev, PIPE_A,
+ if (g4x_compute_wm0(dev_priv, PIPE_A,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
- if (g4x_compute_wm0(dev, PIPE_B,
+ if (g4x_compute_wm0(dev_priv, PIPE_B,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
+ g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
@@ -1442,25 +1434,27 @@ static void g4x_update_wm(struct drm_crtc *crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i965_update_wm(struct drm_crtc *unused_crtc)
+static void i965_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ int hdisplay = crtc->config->pipe_src_w;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
@@ -1478,7 +1472,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- cpp * crtc->cursor->state->crtc_w;
+ cpp * crtc->base.cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1516,34 +1510,38 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
#undef FW_WM
-static void i9xx_update_wm(struct drm_crtc *unused_crtc)
+static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
+ struct intel_crtc *crtc, *enabled = NULL;
- if (IS_I945GM(dev))
+ if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
else if (!IS_GEN2(dev_priv))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
+ crtc = intel_get_crtc_for_plane(dev_priv, 0);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1557,15 +1555,20 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_GEN2(dev_priv))
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
+ crtc = intel_get_crtc_for_plane(dev_priv, 1);
if (intel_crtc_active(crtc)) {
- const struct drm_display_mode *adjusted_mode;
- int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
if (IS_GEN2(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
@@ -1584,7 +1587,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I915GM(dev_priv) && enabled) {
struct drm_i915_gem_object *obj;
- obj = intel_fb_obj(enabled->primary->state->fb);
+ obj = intel_fb_obj(enabled->base.primary->state->fb);
/* self-refresh seems busted with untiled */
if (!i915_gem_object_is_tiled(obj))
@@ -1600,19 +1603,24 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
+ if (HAS_FW_BLC(dev_priv) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
- const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &enabled->config->base.adjusted_mode;
+ const struct drm_framebuffer *fb =
+ enabled->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
- int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
+ int hdisplay = enabled->config->pipe_src_w;
+ int cpp;
unsigned long line_time_us;
int entries;
if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
cpp = 4;
+ else
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1649,23 +1657,22 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i845_update_wm(struct drm_crtc *unused_crtc)
+static void i845_update_wm(struct intel_crtc *unused_crtc)
{
- struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
+ struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev);
+ crtc = single_enabled_crtc(dev_priv);
if (crtc == NULL)
return;
- adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
+ dev_priv->display.get_fifo_size(dev_priv, 0),
4, pessimal_latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -2078,10 +2085,9 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
PIPE_WM_LINETIME_TIME(linetime);
}
-static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
+static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[8])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (IS_GEN9(dev_priv)) {
uint32_t val;
int ret, i;
@@ -2167,14 +2173,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
wm[2] = (sskpd >> 12) & 0xFF;
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
uint32_t sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
- } else if (INTEL_INFO(dev)->gen >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5) {
uint32_t mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
@@ -2262,9 +2268,8 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return true;
}
-static void snb_wm_latency_quirk(struct drm_device *dev)
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool changed;
/*
@@ -2284,11 +2289,9 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_device *dev)
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
@@ -2303,14 +2306,12 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
if (IS_GEN6(dev_priv))
- snb_wm_latency_quirk(dev);
+ snb_wm_latency_quirk(dev_priv);
}
-static void skl_setup_wm_latency(struct drm_device *dev)
+static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
+ intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
@@ -3041,7 +3042,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/* Since we're now guaranteed to only have one active CRTC... */
pipe = ffs(intel_state->active_crtcs) - 1;
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
cstate = to_intel_crtc_state(crtc->base.state);
if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -3160,7 +3161,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- for_each_plane(dev_priv, pipe, plane) {
+ for_each_universal_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
@@ -3262,49 +3263,39 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+ unsigned *plane_data_rate,
+ unsigned *plane_y_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_plane *plane;
+ struct drm_plane *plane;
const struct intel_plane *intel_plane;
- struct drm_plane_state *pstate;
+ const struct drm_plane_state *pstate;
unsigned int rate, total_data_rate = 0;
int id;
- int i;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- for_each_plane_in_state(state, plane, pstate, i) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
id = skl_wm_plane_id(to_intel_plane(plane));
intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe != intel_crtc->pipe)
- continue;
-
/* packed/uv */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 0);
- intel_cstate->wm.skl.plane_data_rate[id] = rate;
+ plane_data_rate[id] = rate;
+
+ total_data_rate += rate;
/* y-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 1);
- intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
- }
+ plane_y_data_rate[id] = rate;
- /* Calculate CRTC's total data rate from cached values */
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- int id = skl_wm_plane_id(intel_plane);
-
- /* packed/uv */
- total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
- total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
+ total_data_rate += rate;
}
return total_data_rate;
@@ -3373,6 +3364,30 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
}
+static void
+skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
+ uint16_t *minimum, uint16_t *y_minimum)
+{
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int id = skl_wm_plane_id(intel_plane);
+
+ if (id == PLANE_CURSOR)
+ continue;
+
+ if (!pstate->visible)
+ continue;
+
+ minimum[id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ }
+
+ minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+}
+
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
@@ -3381,17 +3396,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct drm_plane *plane;
- struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
- uint16_t alloc_size, start, cursor_blocks;
- uint16_t *minimum = cstate->wm.skl.minimum_blocks;
- uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
+ uint16_t alloc_size, start;
+ uint16_t minimum[I915_MAX_PLANES] = {};
+ uint16_t y_minimum[I915_MAX_PLANES] = {};
unsigned int total_data_rate;
int num_active;
int id, i;
+ unsigned plane_data_rate[I915_MAX_PLANES] = {};
+ unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
/* Clear the partitioning for disabled planes. */
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
@@ -3412,57 +3426,43 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- cursor_blocks = skl_cursor_allocation(num_active);
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
-
- alloc_size -= cursor_blocks;
-
- /* 1. Allocate the mininum required blocks for each active plane */
- for_each_plane_in_state(state, plane, pstate, i) {
- intel_plane = to_intel_plane(plane);
- id = skl_wm_plane_id(intel_plane);
-
- if (intel_plane->pipe != pipe)
- continue;
-
- if (!to_intel_plane_state(pstate)->base.visible) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- minimum[id] = 0;
- y_minimum[id] = 0;
- continue;
- }
+ skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
- minimum[id] = skl_ddb_min_alloc(pstate, 0);
- y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
- }
+ /*
+ * 1. Allocate the mininum required blocks for each active plane
+ * and allocate the cursor, it doesn't require extra allocation
+ * proportional to the data rate.
+ */
- for (i = 0; i < PLANE_CURSOR; i++) {
+ for (i = 0; i < I915_MAX_PLANES; i++) {
alloc_size -= minimum[i];
alloc_size -= y_minimum[i];
}
+ ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+
/*
* 2. Distribute the remaining space in proportion to the amount of
* data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
- total_data_rate = skl_get_total_relative_data_rate(cstate);
+ total_data_rate = skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ plane_y_data_rate);
if (total_data_rate == 0)
return 0;
start = alloc->start;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ for (id = 0; id < I915_MAX_PLANES; id++) {
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
- int id = skl_wm_plane_id(intel_plane);
- data_rate = cstate->wm.skl.plane_data_rate[id];
+ if (id == PLANE_CURSOR)
+ continue;
+
+ data_rate = plane_data_rate[id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
@@ -3484,7 +3484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/*
* allocation for y_plane part of planar format:
*/
- y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+ y_data_rate = plane_y_data_rate[id];
y_plane_blocks = y_minimum[id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
@@ -3930,11 +3930,11 @@ bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
}
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
- struct skl_ddb_allocation *ddb, /* out */
+ const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
+ struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
@@ -3942,7 +3942,7 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
if (ret)
return ret;
- if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
+ if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
*changed = false;
else
*changed = true;
@@ -3981,7 +3981,7 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
- drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+ drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
id = skl_wm_plane_id(to_intel_plane(plane));
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
@@ -4096,45 +4096,31 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
to_intel_atomic_state(state);
const struct drm_crtc *crtc;
const struct drm_crtc_state *cstate;
- const struct drm_plane *plane;
const struct intel_plane *intel_plane;
- const struct drm_plane_state *pstate;
const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- enum pipe pipe;
int id;
- int i, j;
+ int i;
for_each_crtc_in_state(state, crtc, cstate, i) {
- pipe = to_intel_crtc(crtc)->pipe;
+ const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
- for_each_plane_in_state(state, plane, pstate, j) {
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
const struct skl_ddb_entry *old, *new;
- intel_plane = to_intel_plane(plane);
id = skl_wm_plane_id(intel_plane);
old = &old_ddb->plane[pipe][id];
new = &new_ddb->plane[pipe][id];
- if (intel_plane->pipe != pipe)
- continue;
-
if (skl_ddb_entry_equal(old, new))
continue;
- if (id != PLANE_CURSOR) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:plane %d%c] ddb (%d - %d) -> (%d - %d)\n",
- plane->base.id, id + 1,
- pipe_name(pipe),
- old->start, old->end,
- new->start, new->end);
- } else {
- DRM_DEBUG_ATOMIC("[PLANE:%d:cursor %c] ddb (%d - %d) -> (%d - %d)\n",
- plane->base.id,
- pipe_name(pipe),
- old->start, old->end,
- new->start, new->end);
- }
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ intel_plane->base.base.id,
+ intel_plane->base.name,
+ old->start, old->end,
+ new->start, new->end);
}
}
}
@@ -4183,10 +4169,12 @@ skl_compute_wm(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(cstate);
+ const struct skl_pipe_wm *old_pipe_wm =
+ &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
- &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
+ &results->ddb, &changed);
if (ret)
return ret;
@@ -4205,22 +4193,19 @@ skl_compute_wm(struct drm_atomic_state *state)
return 0;
}
-static void skl_update_wm(struct drm_crtc *crtc)
+static void skl_update_wm(struct intel_crtc *intel_crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_wm_values *results = &dev_priv->wm.skl_results;
struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *cstate = to_intel_crtc_state(intel_crtc->base.state);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
enum pipe pipe = intel_crtc->pipe;
- if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+ if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
- intel_crtc->wm.active.skl = *pipe_wm;
-
mutex_lock(&dev_priv->wm.wm_mutex);
/*
@@ -4229,10 +4214,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
* the pipe's shut off, just do so here. Already active pipes will have
* their watermarks updated once we update their planes.
*/
- if (crtc->state->active_changed) {
+ if (intel_crtc->base.state->active_changed) {
int plane;
- for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
+ for_each_universal_plane(dev_priv, pipe, plane)
skl_write_plane_wm(intel_crtc, &pipe_wm->planes[plane],
&results->ddb, plane);
@@ -4388,10 +4373,8 @@ void skl_wm_get_hw_state(struct drm_device *dev)
skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
- if (intel_crtc->active) {
+ if (intel_crtc->active)
hw->dirty_pipes |= drm_crtc_mask(crtc);
- intel_crtc->wm.active.skl = cstate->wm.skl.optimal;
- }
}
if (dev_priv->active_crtcs) {
@@ -4553,11 +4536,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
plane->wm.fifo_size = 63;
break;
case DRM_PLANE_TYPE_PRIMARY:
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0);
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
- plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+ plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1);
break;
}
}
@@ -4670,9 +4653,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_crtc *crtc)
+void intel_update_watermarks(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(crtc);
@@ -5878,7 +5861,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
- i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
+ i915_gem_object_put(dev_priv->vlv_pctx);
dev_priv->vlv_pctx = NULL;
}
@@ -6862,10 +6845,8 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
}
}
-static void ibx_init_clock_gating(struct drm_device *dev)
+static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
@@ -6874,9 +6855,8 @@ static void ibx_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
-static void g4x_disable_trickle_feed(struct drm_device *dev)
+static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -6889,10 +6869,8 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
}
}
-static void ilk_init_lp_watermarks(struct drm_device *dev)
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6903,9 +6881,8 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
*/
}
-static void ironlake_init_clock_gating(struct drm_device *dev)
+static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
@@ -6937,7 +6914,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/*
* Based on the document from hardware guys the following bits
@@ -6972,14 +6949,13 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:ilk */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- ibx_init_clock_gating(dev);
+ ibx_init_clock_gating(dev_priv);
}
-static void cpt_init_clock_gating(struct drm_device *dev)
+static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
uint32_t val;
@@ -7014,9 +6990,8 @@ static void cpt_init_clock_gating(struct drm_device *dev)
}
}
-static void gen6_check_mch_setup(struct drm_device *dev)
+static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
@@ -7025,9 +7000,8 @@ static void gen6_check_mch_setup(struct drm_device *dev)
tmp);
}
-static void gen6_init_clock_gating(struct drm_device *dev)
+static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -7054,7 +7028,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -7115,11 +7089,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
- cpt_init_clock_gating(dev);
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -7140,10 +7114,8 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void lpt_init_clock_gating(struct drm_device *dev)
+static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
@@ -7159,10 +7131,8 @@ static void lpt_init_clock_gating(struct drm_device *dev)
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
-static void lpt_suspend_hw(struct drm_device *dev)
+static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_LPT_LP(dev_priv)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -7194,11 +7164,9 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
-static void kabylake_init_clock_gating(struct drm_device *dev)
+static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
@@ -7215,11 +7183,9 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void skylake_init_clock_gating(struct drm_device *dev)
+static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- gen9_init_clock_gating(dev);
+ gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:skl */
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
@@ -7230,12 +7196,11 @@ static void skylake_init_clock_gating(struct drm_device *dev)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void broadwell_init_clock_gating(struct drm_device *dev)
+static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7278,14 +7243,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void haswell_init_clock_gating(struct drm_device *dev)
+static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -7334,15 +7297,14 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- lpt_init_clock_gating(dev);
+ lpt_init_clock_gating(dev_priv);
}
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t snpcr;
- ilk_init_lp_watermarks(dev);
+ ilk_init_lp_watermarks(dev_priv);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -7399,7 +7361,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
gen7_setup_fixed_func_scheduler(dev_priv);
@@ -7430,15 +7392,13 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
if (!HAS_PCH_NOP(dev_priv))
- cpt_init_clock_gating(dev);
+ cpt_init_clock_gating(dev_priv);
- gen6_check_mch_setup(dev);
+ gen6_check_mch_setup(dev_priv);
}
-static void valleyview_init_clock_gating(struct drm_device *dev)
+static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -7517,10 +7477,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
-static void cherryview_init_clock_gating(struct drm_device *dev)
+static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7553,9 +7511,8 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
-static void g4x_init_clock_gating(struct drm_device *dev)
+static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7577,13 +7534,11 @@ static void g4x_init_clock_gating(struct drm_device *dev)
/* WaDisable_RenderCache_OperationalFlush:g4x */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
- g4x_disable_trickle_feed(dev);
+ g4x_disable_trickle_feed(dev_priv);
}
-static void crestline_init_clock_gating(struct drm_device *dev)
+static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
@@ -7596,10 +7551,8 @@ static void crestline_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void broadwater_init_clock_gating(struct drm_device *dev)
+static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
@@ -7613,16 +7566,15 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void gen3_init_clock_gating(struct drm_device *dev)
+static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
- if (IS_PINEVIEW(dev))
+ if (IS_PINEVIEW(dev_priv))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
@@ -7638,10 +7590,8 @@ static void gen3_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i85x_init_clock_gating(struct drm_device *dev)
+static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
@@ -7652,10 +7602,8 @@ static void i85x_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
}
-static void i830_init_clock_gating(struct drm_device *dev)
+static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(MEM_MODE,
@@ -7663,20 +7611,18 @@ static void i830_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
-void intel_init_clock_gating(struct drm_device *dev)
+void intel_init_clock_gating(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->display.init_clock_gating(dev);
+ dev_priv->display.init_clock_gating(dev_priv);
}
-void intel_suspend_hw(struct drm_device *dev)
+void intel_suspend_hw(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_LPT(to_i915(dev)))
- lpt_suspend_hw(dev);
+ if (HAS_PCH_LPT(dev_priv))
+ lpt_suspend_hw(dev_priv);
}
-static void nop_init_clock_gating(struct drm_device *dev)
+static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
}
@@ -7731,25 +7677,23 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
}
/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_device *dev)
+void intel_init_pm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
intel_fbc_init(dev_priv);
/* For cxsr */
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
+ if (IS_PINEVIEW(dev_priv))
+ i915_pineview_get_mem_freq(dev_priv);
else if (IS_GEN5(dev_priv))
- i915_ironlake_get_mem_freq(dev);
+ i915_ironlake_get_mem_freq(dev_priv);
/* For FIFO watermark updates */
- if (INTEL_INFO(dev)->gen >= 9) {
- skl_setup_wm_latency(dev);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev);
+ ilk_setup_wm_latency(dev_priv);
if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
@@ -7767,12 +7711,12 @@ void intel_init_pm(struct drm_device *dev)
"Disable CxSR\n");
}
} else if (IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev);
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
} else if (IS_VALLEYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev);
+ vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_PINEVIEW(dev)) {
+ } else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
@@ -7795,7 +7739,7 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_GEN2(dev_priv)) {
- if (INTEL_INFO(dev)->num_pipes == 1) {
+ if (INTEL_INFO(dev_priv)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -8043,5 +7987,4 @@ void intel_pm_setup(struct drm_device *dev)
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
- atomic_set(&dev_priv->pm.atomic_seq, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 32786ba..700e93d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -648,7 +648,7 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
if (ret != 0)
return ret;
- ret = i915_gem_render_state_init(req);
+ ret = i915_gem_render_state_emit(req);
if (ret)
return ret;
@@ -1213,90 +1213,62 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static int gen8_rcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 8);
- if (ret)
- return ret;
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring,
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- intel_ring_emit(ring, lower_32_bits(gtt_offset));
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *out++ = lower_32_bits(gtt_offset);
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = 0;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen8_xcs_signal(struct drm_i915_gem_request *req)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, (num_rings-1) * 6);
- if (ret)
- return ret;
for_each_engine(waiter, dev_priv, id) {
u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(ring,
- (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(ring,
- lower_32_bits(gtt_offset) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, upper_32_bits(gtt_offset));
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring,
- MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(ring, 0);
+ *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+ *out++ = upper_32_bits(gtt_offset);
+ *out++ = req->global_seqno;
+ *out++ = (MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ *out++ = 0;
}
- intel_ring_advance(ring);
- return 0;
+ return out;
}
-static int gen6_signal(struct drm_i915_gem_request *req)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int ret, num_rings;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings;
- ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
- if (ret)
- return ret;
+ int num_rings = 0;
for_each_engine(engine, dev_priv, id) {
i915_reg_t mbox_reg;
@@ -1306,101 +1278,78 @@ static int gen6_signal(struct drm_i915_gem_request *req)
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, mbox_reg);
- intel_ring_emit(ring, req->fence.seqno);
+ *out++ = MI_LOAD_REGISTER_IMM(1);
+ *out++ = i915_mmio_reg_offset(mbox_reg);
+ *out++ = req->global_seqno;
+ num_rings++;
}
}
+ if (num_rings & 1)
+ *out++ = MI_NOOP;
- /* If num_dwords was rounded, make sure the tail pointer is correct */
- if (num_rings % 2 == 0)
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- return 0;
+ return out;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
- I915_WRITE_TAIL(request->engine,
- intel_ring_offset(request->ring, request->tail));
+ I915_WRITE_TAIL(request->engine, request->tail);
}
-static int i9xx_emit_request(struct drm_i915_gem_request *req)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ *out++ = MI_STORE_DWORD_INDEX;
+ *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *out++ = req->global_seqno;
+ *out++ = MI_USER_INTERRUPT;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, req->fence.seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
-
- req->tail = ring->tail;
-
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int i9xx_emit_breadcrumb_sz = 4;
+
/**
- * gen6_sema_emit_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
- int ret;
-
- ret = req->engine->semaphore.signal(req);
- if (ret)
- return ret;
-
- return i9xx_emit_request(req);
+ return i9xx_emit_breadcrumb(req,
+ req->engine->semaphore.signal(req, out));
}
-static int gen8_render_emit_request(struct drm_i915_gem_request *req)
+static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
+ u32 *out)
{
struct intel_engine_cs *engine = req->engine;
- struct intel_ring *ring = req->ring;
- int ret;
- if (engine->semaphore.signal) {
- ret = engine->semaphore.signal(req);
- if (ret)
- return ret;
- }
+ if (engine->semaphore.signal)
+ out = engine->semaphore.signal(req, out);
- ret = intel_ring_begin(req, 8);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ *out++ = GFX_OP_PIPE_CONTROL(6);
+ *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(ring, intel_hws_seqno_address(engine));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+ PIPE_CONTROL_QW_WRITE);
+ *out++ = intel_hws_seqno_address(engine);
+ *out++ = 0;
+ *out++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *out++ = 0;
+ *out++ = MI_USER_INTERRUPT;
+ *out++ = MI_NOOP;
- req->tail = ring->tail;
-
- return 0;
+ req->tail = intel_ring_offset(req->ring, out);
}
+static const int gen8_render_emit_breadcrumb_sz = 8;
+
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
@@ -1427,7 +1376,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(ring, signal->fence.seqno);
+ intel_ring_emit(ring, signal->global_seqno);
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_advance(ring);
@@ -1465,7 +1414,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- intel_ring_emit(ring, signal->fence.seqno - 1);
+ intel_ring_emit(ring, signal->global_seqno - 1);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1608,7 +1557,7 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1617,7 +1566,7 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~0);
- gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
@@ -1762,14 +1711,19 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
+ obj = vma->obj;
+
i915_vma_unpin(vma);
- i915_gem_object_unpin_map(vma->obj);
- i915_vma_put(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_unless_active(obj);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -1777,9 +1731,10 @@ static int init_status_page(struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags;
+ void *vaddr;
int ret;
- obj = i915_gem_object_create(&engine->i915->drm, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@@ -1812,15 +1767,22 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_unpin;
+ }
+
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr =
- i915_gem_object_pin_map(obj, I915_MAP_WB);
+ engine->status_page.page_addr = memset(vaddr, 0, 4096);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
return 0;
+err_unpin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ret;
@@ -1967,7 +1929,11 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
void
intel_ring_free(struct intel_ring *ring)
{
- i915_vma_put(ring->vma);
+ struct drm_i915_gem_object *obj = ring->vma->obj;
+
+ i915_vma_close(ring->vma);
+ __i915_gem_object_release_unless_active(obj);
+
kfree(ring);
}
@@ -1983,14 +1949,13 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
- ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
- if (ret)
- goto error;
+ struct i915_vma *vma;
- ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
- PIN_GLOBAL | PIN_HIGH);
- if (ret)
+ vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto error;
+ }
}
/* The kernel context is only used as a placeholder for flushing the
@@ -2037,9 +2002,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
intel_engine_setup_common(engine);
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
-
ret = intel_engine_init_common(engine);
if (ret)
goto error;
@@ -2155,7 +2117,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
- int ret;
+ long timeout;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
intel_ring_update_space(ring);
if (ring->space >= bytes)
@@ -2185,11 +2149,11 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- ret = i915_wait_request(target,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NO_WAITBOOST);
- if (ret)
- return ret;
+ timeout = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
i915_gem_request_retire_upto(target);
@@ -2618,9 +2582,22 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
- engine->emit_request = i9xx_emit_request;
- if (i915.semaphores)
- engine->emit_request = gen6_sema_emit_request;
+ engine->emit_breadcrumb = i9xx_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
+ if (i915.semaphores) {
+ int num_rings;
+
+ engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
+
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ if (INTEL_GEN(dev_priv) >= 8) {
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ } else {
+ engine->emit_breadcrumb_sz += num_rings * 3;
+ if (num_rings & 1)
+ engine->emit_breadcrumb_sz++;
+ }
+ }
engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
@@ -2647,10 +2624,18 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->emit_request = gen8_render_emit_request;
+ engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
- if (i915.semaphores)
+ if (i915.semaphores) {
+ int num_rings;
+
engine->semaphore.signal = gen8_rcs_signal;
+
+ num_rings =
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ engine->emit_breadcrumb_sz += num_rings * 6;
+ }
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 32b2e63..642b546 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -4,6 +4,7 @@
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
+#include "i915_gem_timeline.h"
#define I915_CMD_HASH_ORDER 9
@@ -157,6 +158,7 @@ struct i915_ctx_workarounds {
};
struct drm_i915_gem_request;
+struct intel_render_state;
struct intel_engine_cs {
struct drm_i915_private *i915;
@@ -168,7 +170,6 @@ struct intel_engine_cs {
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
-#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
enum intel_engine_hw_id {
@@ -179,10 +180,12 @@ struct intel_engine_cs {
VCS2_HW
} hw_id;
enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
- u64 fence_context;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
+ struct intel_timeline *timeline;
+
+ struct intel_render_state *render_state;
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -204,7 +207,7 @@ struct intel_engine_cs {
struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted;
- spinlock_t lock; /* protects the lists of requests */
+ spinlock_t lock; /* protects the lists of requests; irqsafe */
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
struct intel_wait *first_wait; /* oldest waiter by retirement */
@@ -252,7 +255,9 @@ struct intel_engine_cs {
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
- int (*emit_request)(struct drm_i915_gem_request *req);
+ void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
+ u32 *out);
+ int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
* the legacy ringbuffer or to the end of an execlist).
@@ -309,8 +314,6 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- u32 sync_seqno[I915_NUM_ENGINES-1];
-
union {
#define GEN6_SEMAPHORE_LAST VECS_HW
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
@@ -327,7 +330,7 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
- int (*signal)(struct drm_i915_gem_request *req);
+ u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
} semaphore;
/* Execlists */
@@ -343,27 +346,6 @@ struct intel_engine_cs {
bool preempt_wa;
u32 ctx_desc_template;
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
- * Seqno of request most recently submitted to request_list.
- * Used exclusively by hang checker to avoid grabbing lock while
- * inspecting request list.
- */
- u32 last_submitted_seqno;
- u32 last_pending_seqno;
-
- /* An RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_gem_active_get_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_gem_active last_request;
-
struct i915_gem_context *last_context;
struct intel_engine_hangcheck hangcheck;
@@ -401,27 +383,6 @@ intel_engine_flag(const struct intel_engine_cs *engine)
return 1 << engine->id;
}
-static inline u32
-intel_engine_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
-{
- int idx;
-
- /*
- * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
- * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
- * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
- * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
- * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
- */
-
- idx = (other->id - engine->id) - 1;
- if (idx < 0)
- idx += I915_NUM_ENGINES;
-
- return idx;
-}
-
static inline void
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
@@ -504,30 +465,23 @@ static inline void intel_ring_advance(struct intel_ring *ring)
*/
}
-static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- return value & (ring->size - 1);
+ u32 offset = addr - ring->vaddr;
+ return offset & (ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
-void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-static inline int intel_engine_idle(struct intel_engine_cs *engine,
- unsigned int flags)
-{
- /* Wait upon the last request to be completed */
- return i915_gem_active_wait_unlocked(&engine->last_request,
- flags, NULL, NULL);
-}
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
@@ -542,6 +496,18 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
+static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
+{
+ /* We are only peeking at the tail of the submit queue (and not the
+ * queue itself) in order to gain a hint as to the current active
+ * state of the engine. Callers are not expected to be taking
+ * engine->timeline->lock, nor are they expected to be concerned
+ * wtih serialising this hint with anything, so document it as
+ * a hint and nothing more.
+ */
+ return READ_ONCE(engine->timeline->last_submitted_seqno);
+}
+
int init_workarounds_ring(struct intel_engine_cs *engine);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
@@ -615,9 +581,4 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
-static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
-{
- return i915_gem_active_isset(&engine->last_request);
-}
-
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ee56a87..0599408 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -330,7 +330,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
- if (power_well->data == SKL_DISP_PW_2) {
+ if (power_well->id == SKL_DISP_PW_2) {
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(pdev, VGA_RSRC_LEGACY_IO);
@@ -343,7 +343,7 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (power_well->data == SKL_DISP_PW_2)
+ if (power_well->id == SKL_DISP_PW_2)
gen8_irq_power_well_pre_disable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
@@ -658,7 +658,7 @@ static void
gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
+ enum skl_disp_power_wells power_well_id = power_well->id;
u32 val;
u32 mask;
@@ -703,7 +703,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
fuse_status = I915_READ(SKL_FUSE_STATUS);
- switch (power_well->data) {
+ switch (power_well->id) {
case SKL_DISP_PW_1:
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
@@ -727,13 +727,13 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
case SKL_DISP_PW_MISC_IO:
break;
default:
- WARN(1, "Unknown power well %lu\n", power_well->data);
+ WARN(1, "Unknown power well %lu\n", power_well->id);
return;
}
- req_mask = SKL_POWER_WELL_REQ(power_well->data);
+ req_mask = SKL_POWER_WELL_REQ(power_well->id);
enable_requested = tmp & req_mask;
- state_mask = SKL_POWER_WELL_STATE(power_well->data);
+ state_mask = SKL_POWER_WELL_STATE(power_well->id);
is_enabled = tmp & state_mask;
if (!enable && enable_requested)
@@ -769,14 +769,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
power_well->name, enable ? "enable" : "disable");
if (check_fuse_status) {
- if (power_well->data == SKL_DISP_PW_1) {
+ if (power_well->id == SKL_DISP_PW_1) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
SKL_FUSE_PG1_DIST_STATUS,
1))
DRM_ERROR("PG1 distributing status timeout\n");
- } else if (power_well->data == SKL_DISP_PW_2) {
+ } else if (power_well->id == SKL_DISP_PW_2) {
if (intel_wait_for_register(dev_priv,
SKL_FUSE_STATUS,
SKL_FUSE_PG2_DIST_STATUS,
@@ -818,8 +818,8 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
- SKL_POWER_WELL_STATE(power_well->data);
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
+ SKL_POWER_WELL_STATE(power_well->id);
return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
}
@@ -845,45 +845,22 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
skl_set_power_well(dev_priv, power_well, false);
}
-static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
-{
- enum skl_disp_power_wells power_well_id = power_well->data;
-
- return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
-}
-
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum skl_disp_power_wells power_well_id = power_well->data;
- struct i915_power_well *cmn_a_well = NULL;
-
- if (power_well_id == BXT_DPIO_CMN_BC) {
- /*
- * We need to copy the GRC calibration value from the eDP PHY,
- * so make sure it's powered up.
- */
- cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
- intel_power_well_get(dev_priv, cmn_a_well);
- }
-
- bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
-
- if (cmn_a_well)
- intel_power_well_put(dev_priv, cmn_a_well);
+ bxt_ddi_phy_init(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_uninit(dev_priv, power_well->data);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv,
- bxt_power_well_to_phy(power_well));
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -902,13 +879,11 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- bxt_power_well_to_phy(power_well));
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
}
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -932,7 +907,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
WARN_ON(dev_priv->cdclk_freq !=
- dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+ dev_priv->display.get_display_clock_speed(dev_priv));
gen9_assert_dbuf_enabled(dev_priv);
@@ -975,7 +950,7 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- enum punit_power_well power_well_id = power_well->data;
+ enum punit_power_well power_well_id = power_well->id;
u32 mask;
u32 state;
u32 ctrl;
@@ -1029,7 +1004,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int power_well_id = power_well->data;
+ int power_well_id = power_well->id;
bool enabled = false;
u32 mask;
u32 state;
@@ -1138,13 +1113,15 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
intel_power_sequencer_reset(dev_priv);
- intel_hpd_poll_init(dev_priv);
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_init(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_set_power_well(dev_priv, power_well, true);
@@ -1154,7 +1131,7 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
vlv_display_power_well_deinit(dev_priv);
@@ -1164,7 +1141,7 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1190,7 +1167,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum pipe pipe;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
for_each_pipe(dev_priv, pipe)
assert_pll_disabled(dev_priv, pipe);
@@ -1213,7 +1190,7 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
struct i915_power_well *power_well;
power_well = &power_domains->power_wells[i];
- if (power_well->data == power_well_id)
+ if (power_well->id == power_well_id)
return power_well;
}
@@ -1337,10 +1314,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
uint32_t tmp;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
pipe = PIPE_A;
phy = DPIO_PHY0;
} else {
@@ -1368,7 +1345,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@@ -1399,10 +1376,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+ power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
- if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
assert_pll_disabled(dev_priv, PIPE_A);
assert_pll_disabled(dev_priv, PIPE_B);
@@ -1551,7 +1528,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
bool enabled;
u32 state, ctrl;
@@ -1581,7 +1558,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool enable)
{
- enum pipe pipe = power_well->data;
+ enum pipe pipe = power_well->id;
u32 state;
u32 ctrl;
@@ -1614,7 +1591,7 @@ out:
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
@@ -1622,7 +1599,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
@@ -1632,7 +1609,7 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->data != PIPE_A);
+ WARN_ON_ONCE(power_well->id != PIPE_A);
vlv_display_power_well_deinit(dev_priv);
@@ -1976,12 +1953,12 @@ static struct i915_power_well vlv_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = PUNIT_POWER_WELL_ALWAYS_ON,
+ .id = PUNIT_POWER_WELL_ALWAYS_ON,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
+ .id = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
@@ -1991,7 +1968,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
@@ -2000,7 +1977,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
@@ -2009,7 +1986,7 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
@@ -2018,12 +1995,12 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
@@ -2043,19 +2020,19 @@ static struct i915_power_well chv_power_wells[] = {
* required for any pipe to work.
*/
.domains = CHV_DISPLAY_POWER_DOMAINS,
- .data = PIPE_A,
+ .id = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
{
.name = "dpio-common-bc",
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+ .id = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
};
@@ -2078,57 +2055,57 @@ static struct i915_power_well skl_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .data = SKL_DISP_PW_ALWAYS_ON,
+ .id = SKL_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_MISC_IO,
+ .id = SKL_DISP_PW_MISC_IO,
},
{
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "DDI A/E power well",
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_A_E,
+ .id = SKL_DISP_PW_DDI_A_E,
},
{
.name = "DDI B power well",
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_B,
+ .id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C power well",
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_C,
+ .id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D power well",
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_DDI_D,
+ .id = SKL_DISP_PW_DDI_D,
},
};
@@ -2143,31 +2120,33 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "power well 1",
.domains = 0,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_1,
+ .id = SKL_DISP_PW_1,
},
{
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .data = SKL_DISP_PW_DC_OFF,
+ .id = SKL_DISP_PW_DC_OFF,
},
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
- .data = SKL_DISP_PW_2,
+ .id = SKL_DISP_PW_2,
},
{
.name = "dpio-common-a",
.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_A,
+ .id = BXT_DPIO_CMN_A,
+ .data = DPIO_PHY1,
},
{
.name = "dpio-common-bc",
.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .data = BXT_DPIO_CMN_BC,
+ .id = BXT_DPIO_CMN_BC,
+ .data = DPIO_PHY0,
},
};
@@ -2736,8 +2715,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
- atomic_inc(&dev_priv->pm.atomic_seq);
+ atomic_dec(&dev_priv->pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49fb95d..3990c80 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1472,7 +1472,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1509,7 +1509,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_write_sdvox(intel_sdvo, temp);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
@@ -2411,10 +2411,10 @@ static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
- struct drm_device *dev = connector->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev);
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+ if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 43d0350..df0fbb4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1042,10 +1042,10 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-int
-intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = NULL;
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
@@ -1054,9 +1054,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
int num_plane_formats;
int ret;
- if (INTEL_INFO(dev)->gen < 5)
- return -ENODEV;
-
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
if (!intel_plane) {
ret = -ENOMEM;
@@ -1070,25 +1067,25 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
}
intel_plane->base.state = &state->base;
- switch (INTEL_INFO(dev)->gen) {
- case 5:
- case 6:
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
- intel_plane->max_downscale = 16;
- intel_plane->update_plane = ilk_update_plane;
- intel_plane->disable_plane = ilk_disable_plane;
+ state->scaler_id = -1;
- if (IS_GEN6(dev_priv)) {
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
- } else {
- plane_formats = ilk_plane_formats;
- num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
- }
- break;
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
- case 7:
- case 8:
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+
+ intel_plane->update_plane = vlv_update_plane;
+ intel_plane->disable_plane = vlv_disable_plane;
+
+ plane_formats = vlv_plane_formats;
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ } else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
@@ -1097,33 +1094,25 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->max_downscale = 1;
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_plane->update_plane = vlv_update_plane;
- intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
- plane_formats = vlv_plane_formats;
- num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
- } else {
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 16;
+
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
- break;
- case 9:
- intel_plane->can_scale = true;
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- state->scaler_id = -1;
-
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- break;
- default:
- MISSING_CASE(INTEL_INFO(dev)->gen);
- ret = -ENODEV;
- goto fail;
}
if (INTEL_GEN(dev_priv) >= 9) {
@@ -1142,15 +1131,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
- if (INTEL_INFO(dev)->gen >= 9)
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ if (INTEL_GEN(dev_priv) >= 9)
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
+ ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
+ possible_crtcs, &intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, plane));
@@ -1163,11 +1152,11 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
- return 0;
+ return intel_plane;
fail:
kfree(state);
kfree(intel_plane);
- return ret;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 7118fb55..9212f00 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -856,7 +856,7 @@ intel_enable_tv(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
- intel_wait_for_vblank(encoder->base.dev,
+ intel_wait_for_vblank(dev_priv,
to_intel_crtc(encoder->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
@@ -1238,7 +1238,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
@@ -1268,7 +1268,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
OpenPOWER on IntegriCloud