summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c25
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c19
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c445
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c57
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c13
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c13
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h69
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen4
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si.c11
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.c34
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c254
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c165
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c30
95 files changed, 1836 insertions, 587 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fb7cf0e..0a1e4a5 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2674,7 +2674,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
int modes = 0;
u8 cea_mode;
- if (video_db == NULL || video_index > video_len)
+ if (video_db == NULL || video_index >= video_len)
return 0;
/* CEA modes are numbered 1..127 */
@@ -2701,7 +2701,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
if (structure & (1 << 8)) {
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (newmode) {
- newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+ newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
modes++;
}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1a35ea5..c22c309 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -489,6 +489,11 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@@ -501,6 +506,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
int drm_sysfs_device_add(struct drm_minor *minor)
{
char *minor_str;
+ int r;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
@@ -509,14 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
- minor->kdev = device_create(drm_class, minor->dev->dev,
- MKDEV(DRM_MAJOR, minor->index),
- minor, minor_str, minor->index);
- if (IS_ERR(minor->kdev)) {
- DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
- return PTR_ERR(minor->kdev);
+ minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
+ if (!minor->kdev) {
+ r = -ENOMEM;
+ goto error;
}
+
+ device_initialize(minor->kdev);
+ minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ minor->kdev->class = drm_class;
+ minor->kdev->type = &drm_sysfs_device_minor;
+ minor->kdev->parent = minor->dev->dev;
+ minor->kdev->release = drm_sysfs_release;
+ dev_set_drvdata(minor->kdev, minor);
+
+ r = dev_set_name(minor->kdev, minor_str, minor->index);
+ if (r < 0)
+ goto error;
+
+ r = device_add(minor->kdev);
+ if (r < 0)
+ goto error;
+
return 0;
+
+error:
+ DRM_ERROR("device create failed %d\n", r);
+ put_device(minor->kdev);
+ return r;
}
/**
@@ -529,7 +555,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
void drm_sysfs_device_remove(struct drm_minor *minor)
{
if (minor->kdev)
- device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+ device_unregister(minor->kdev);
minor->kdev = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b676006..22b8f5e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file)
{
+ exynos_drm_subdrv_close(dev, file);
+}
+
+static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
struct exynos_drm_private *private = dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
+ struct drm_pending_vblank_event *v, *vt;
+ struct drm_pending_event *e, *et;
unsigned long flags;
- /* release events of current file */
+ if (!file->driver_priv)
+ return;
+
+ /* Release all events not unhandled by page flip handler. */
spin_lock_irqsave(&dev->event_lock, flags);
- list_for_each_entry_safe(e, t, &private->pageflip_event_list,
+ list_for_each_entry_safe(v, vt, &private->pageflip_event_list,
base.link) {
- if (e->base.file_priv == file) {
- list_del(&e->base.link);
- e->base.destroy(&e->base);
+ if (v->base.file_priv == file) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base);
}
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
- exynos_drm_subdrv_close(dev, file);
-}
+ /* Release all events handled by page flip handler but not freed. */
+ list_for_each_entry_safe(e, et, &file->event_list, link) {
+ list_del(&e->link);
+ e->destroy(e);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
-static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
-{
- if (!file->driver_priv)
- return;
kfree(file->driver_priv);
file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 23da72b..a61878b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -31,7 +31,7 @@
#include "exynos_drm_iommu.h"
/*
- * FIMD is stand for Fully Interactive Mobile Display and
+ * FIMD stands for Fully Interactive Mobile Display and
* as a display controller, it transfers contents drawn on memory
* to a LCD Panel through Display Interfaces such as RGB or
* CPU Interface.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3271fd4..7bccedc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -383,6 +383,8 @@ out:
g2d_userptr->npages,
g2d_userptr->vma);
+ exynos_gem_put_vma(g2d_userptr->vma);
+
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 989be12..2e367a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
*/
+ mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
+ mutex_unlock(&dev->mode_config.mutex);
intel_modeset_suspend_hw(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8600c31..ccdbecc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1816,6 +1816,7 @@ struct drm_i915_file_private {
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12bbd5e..621c7c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
- if (IS_HSW_GT3(dev))
- I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
- else
- I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+ if (IS_HASWELL(dev))
+ I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752f..9bb533e 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
ret = i915_gem_object_get_pages(obj);
if (ret)
- goto error;
+ goto err;
+
+ i915_gem_object_pin_pages(obj);
ret = -ENOMEM;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
- goto error;
+ goto err_unpin;
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
drm_free_large(pages);
if (!obj->dma_buf_vmapping)
- goto error;
+ goto err_unpin;
obj->vmapping_count = 1;
- i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
-error:
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595..b7e787f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+
struct eb_vmas {
struct list_head vmas;
int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
}
}
-static void eb_destroy(struct eb_vmas *eb) {
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return;
+
+ entry = vma->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma;
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
struct i915_vma,
exec_list);
list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
return ret;
}
-#define __EXEC_OBJECT_HAS_PIN (1<<31)
-#define __EXEC_OBJECT_HAS_FENCE (1<<30)
-
static int
need_reloc_mappable(struct i915_vma *vma)
{
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0;
}
-static void
-i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
-{
- struct drm_i915_gem_exec_object2 *entry;
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (!drm_mm_node_allocated(&vma->node))
- return;
-
- entry = vma->exec_entry;
-
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
- i915_gem_object_unpin_fence(obj);
-
- if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- i915_gem_object_unpin(obj);
-
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
-
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
goto err;
}
-err: /* Decrement pin count for bound objects */
- list_for_each_entry(vma, vmas, exec_list)
- i915_gem_execbuffer_unreserve_vma(vma);
-
+err:
if (ret != -ENOSPC || retry++)
return ret;
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(vma, vmas, exec_list)
+ i915_gem_execbuffer_unreserve_vma(vma);
+
ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
while (!list_empty(&eb->vmas)) {
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
+ i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3620a1b..38cb8d4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
case I915_CACHE_NONE:
break;
case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE0;
+ pte |= HSW_WT_ELLC_LLC_AGE3;
break;
default:
- pte |= HSW_WB_ELLC_LLC_AGE0;
+ pte |= HSW_WB_ELLC_LLC_AGE3;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f9eafb6..ee27421 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -235,6 +235,7 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 43959ed..dfff090 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -196,7 +196,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
acpi_handle dhandle;
int ret;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6dd622d..e4fba39 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
- dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ /*
+ * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+ * clock for LVDS.
+ */
+ dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
+ !HAS_PCH_SPLIT(dev));
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 1591576..526c8de 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations = ddi_translations_dp;
break;
case PORT_D:
- if (intel_dpd_is_edp(dev))
+ if (intel_dp_is_edp(dev, PORT_D))
ddi_translations = ddi_translations_edp;
else
ddi_translations = ddi_translations_dp;
@@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
- if (type == INTEL_OUTPUT_EDP) {
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
}
@@ -1406,6 +1407,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
default:
break;
}
+
+ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+ dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ }
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cddd50..080f6fd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
uint16_t postoff = 0;
if (intel_crtc->config.limited_color_range)
- postoff = (16 * (1 << 13) / 255) & 0x1fff;
+ postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
- dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv);
}
void hsw_enable_pc8_work(struct work_struct *__work)
@@ -6518,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6525,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@@ -6562,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
if (!i915_enable_pc8)
return;
@@ -6585,18 +6594,28 @@ done:
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
- hsw_enable_package_c8(dev_priv);
+ __hsw_enable_package_c8(dev_priv);
}
+ mutex_unlock(&dev_priv->pc8.lock);
}
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PC8(dev_priv->dev))
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
- hsw_disable_package_c8(dev_priv);
+ __hsw_disable_package_c8(dev_priv);
}
+ mutex_unlock(&dev_priv->pc8.lock);
}
#define for_each_power_domain(domain, mask) \
@@ -7184,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR(pipe));
I915_WRITE(CURBASE(pipe), base);
+ POSTING_READ(CURBASE(pipe));
}
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -7213,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
+ POSTING_READ(CURCNTR_IVB(pipe));
I915_WRITE(CURBASE_IVB(pipe), base);
+ POSTING_READ(CURBASE_IVB(pipe));
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8331,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+ MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
}
@@ -9248,8 +9272,7 @@ check_crtc_state(struct drm_device *dev)
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config &&
- encoder->get_hw_state(encoder, &pipe))
+ if (encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
@@ -10027,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
- dpd_is_edp = intel_dpd_is_edp(dev);
+ dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
@@ -10064,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
- PORT_C);
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
intel_dsi_init(dev);
@@ -10909,8 +10931,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
- if (encoder->get_config)
- encoder->get_config(encoder, &crtc->config);
+ encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index eb8139d..30c627c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1774,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
@@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
}
/* check the VBT to see whether the eDP is on DP-D port */
-bool intel_dpd_is_edp(struct drm_device *dev)
+bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
union child_device_config *p_child;
int i;
+ static const short port_mapping[] = {
+ [PORT_B] = PORT_IDPB,
+ [PORT_C] = PORT_IDPC,
+ [PORT_D] = PORT_IDPD,
+ };
+
+ if (port == PORT_A)
+ return true;
if (!dev_priv->vbt.child_dev_num)
return false;
@@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
- if (p_child->common.dvo_port == PORT_IDPD &&
+ if (p_child->common.dvo_port == port_mapping[port] &&
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
@@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- type = DRM_MODE_CONNECTOR_DisplayPort;
- /*
- * FIXME : We need to initialize built-in panels before external panels.
- * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
- */
- switch (port) {
- case PORT_A:
+ if (intel_dp_is_edp(dev, port))
type = DRM_MODE_CONNECTOR_eDP;
- break;
- case PORT_C:
- if (IS_VALLEYVIEW(dev))
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- case PORT_D:
- if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- default: /* silence GCC warning */
- break;
- }
+ else
+ type = DRM_MODE_CONNECTOR_DisplayPort;
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1e49aa8..a18e88b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
-bool intel_dpd_is_edp(struct drm_device *dev);
+bool intel_dp_is_edp(struct drm_device *dev, enum port port);
void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_on(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 1b2f41c..6d69a9b 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -638,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev)
u32 temp;
int i = 0;
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0a07d7c..6e0d5e0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(crtc)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
- int htotal = adjusted_mode->htotal;
+ int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
@@ -1624,8 +1624,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(enabled)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
- int htotal = adjusted_mode->htotal;
- int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
+ int htotal = adjusted_mode->crtc_htotal;
+ int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->htotal;
+ htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
- linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
- ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
+ linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
+ mode->crtc_clock);
+ ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
intel_ddi_get_cdclk_freq(dev_priv));
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -3888,7 +3889,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 18c4062..22cf0f4 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
}
+static void
+intel_tv_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->get_config = intel_tv_get_config;
intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f9883ce..0b02078 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -217,6 +217,19 @@ static void gen6_force_wake_work(struct work_struct *work)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -234,19 +247,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
-}
-static void intel_uncore_forcewake_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- __gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
+ intel_uncore_forcewake_reset(dev);
}
void intel_uncore_sanitize(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index edcf801..b3fa1ba 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o
nouveau-y += core/subdev/clock/nv50.o
nouveau-y += core/subdev/clock/nv84.o
nouveau-y += core/subdev/clock/nva3.o
+nouveau-y += core/subdev/clock/nvaa.o
nouveau-y += core/subdev/clock/nvc0.o
nouveau-y += core/subdev/clock/nve0.o
nouveau-y += core/subdev/clock/pllnv04.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index db13982..db3fc7b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
@@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
- device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 5f55578..e6352bd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -33,6 +33,7 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+#include "nv04.h"
#include "nv50.h"
/*******************************************************************************
@@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv50_fifo_cclass;
nv_engine(priv)->sclass = nv50_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 0908dc8..fe0f41e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -35,6 +35,7 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+#include "nv04.h"
#include "nv50.h"
/*******************************************************************************
@@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass;
nv_engine(priv)->sclass = nv84_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b574dd4..5ce686e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- chan->vblank.nr_event = pdisp->vblank->index_nr;
+ chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
chan->vblank.event = kzalloc(chan->vblank.nr_event *
sizeof(*chan->vblank.event), GFP_KERNEL);
if (!chan->vblank.event)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index e2675bc..8f4ced7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -14,6 +14,9 @@ enum nv_clk_src {
nv_clk_src_hclk,
nv_clk_src_hclkm3,
nv_clk_src_hclkm3d2,
+ nv_clk_src_hclkm2d3, /* NVAA */
+ nv_clk_src_hclkm4, /* NVAA */
+ nv_clk_src_cclk, /* NVAA */
nv_clk_src_host,
@@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass;
extern struct nouveau_oclass nv40_clock_oclass;
extern struct nouveau_oclass *nv50_clock_oclass;
extern struct nouveau_oclass *nv84_clock_oclass;
+extern struct nouveau_oclass *nvaa_clock_oclass;
extern struct nouveau_oclass nva3_clock_oclass;
extern struct nouveau_oclass nvc0_clock_oclass;
extern struct nouveau_oclass nve0_clock_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index da50c1b..30c1f3a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
return 0;
}
+static struct nouveau_clocks
+nv04_domain[] = {
+ { nv_clk_src_max }
+};
+
static int
nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
new file mode 100644
index 0000000..7a723b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/fifo.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/clock.h>
+
+#include "pll.h"
+
+struct nvaa_clock_priv {
+ struct nouveau_clock base;
+ enum nv_clk_src csrc, ssrc, vsrc;
+ u32 cctrl, sctrl;
+ u32 ccoef, scoef;
+ u32 cpost, spost;
+ u32 vdiv;
+};
+
+static u32
+read_div(struct nouveau_clock *clk)
+{
+ return nv_rd32(clk, 0x004600);
+}
+
+static u32
+read_pll(struct nouveau_clock *clk, u32 base)
+{
+ u32 ctrl = nv_rd32(clk, base + 0);
+ u32 coef = nv_rd32(clk, base + 4);
+ u32 ref = clk->read(clk, nv_clk_src_href);
+ u32 post_div = 0;
+ u32 clock = 0;
+ int N1, M1;
+
+ switch (base){
+ case 0x4020:
+ post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+ break;
+ case 0x4028:
+ post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+ break;
+ default:
+ break;
+ }
+
+ N1 = (coef & 0x0000ff00) >> 8;
+ M1 = (coef & 0x000000ff);
+ if ((ctrl & 0x80000000) && M1) {
+ clock = ref * N1 / M1;
+ clock = clock / post_div;
+ }
+
+ return clock;
+}
+
+static int
+nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ u32 mast = nv_rd32(clk, 0x00c054);
+ u32 P = 0;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(priv)->crystal;
+ case nv_clk_src_href:
+ return 100000; /* PCIE reference clock */
+ case nv_clk_src_hclkm4:
+ return clk->read(clk, nv_clk_src_href) * 4;
+ case nv_clk_src_hclkm2d3:
+ return clk->read(clk, nv_clk_src_href) * 2 / 3;
+ case nv_clk_src_host:
+ switch (mast & 0x000c0000) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+ case 0x00040000: break;
+ case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
+ case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+ }
+ break;
+ case nv_clk_src_core:
+ P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+
+ switch (mast & 0x00000003) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000001: return 0;
+ case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+ case 0x00000003: return read_pll(clk, 0x004028) >> P;
+ }
+ break;
+ case nv_clk_src_cclk:
+ if ((mast & 0x03000000) != 0x03000000)
+ return clk->read(clk, nv_clk_src_core);
+
+ if ((mast & 0x00000200) == 0x00000000)
+ return clk->read(clk, nv_clk_src_core);
+
+ switch (mast & 0x00000c00) {
+ case 0x00000000: return clk->read(clk, nv_clk_src_href);
+ case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
+ case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+ default: return 0;
+ }
+ case nv_clk_src_shader:
+ P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+ switch (mast & 0x00000030) {
+ case 0x00000000:
+ if (mast & 0x00000040)
+ return clk->read(clk, nv_clk_src_href) >> P;
+ return clk->read(clk, nv_clk_src_crystal) >> P;
+ case 0x00000010: break;
+ case 0x00000020: return read_pll(clk, 0x004028) >> P;
+ case 0x00000030: return read_pll(clk, 0x004020) >> P;
+ }
+ break;
+ case nv_clk_src_mem:
+ return 0;
+ break;
+ case nv_clk_src_vdec:
+ P = (read_div(clk) & 0x00000700) >> 8;
+
+ switch (mast & 0x00400000) {
+ case 0x00400000:
+ return clk->read(clk, nv_clk_src_core) >> P;
+ break;
+ default:
+ return 500000 >> P;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+ return 0;
+}
+
+static u32
+calc_pll(struct nvaa_clock_priv *priv, u32 reg,
+ u32 clock, int *N, int *M, int *P)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_pll pll;
+ struct nouveau_clock *clk = &priv->base;
+ int ret;
+
+ ret = nvbios_pll_parse(bios, reg, &pll);
+ if (ret)
+ return 0;
+
+ pll.vco2.max_freq = 0;
+ pll.refclk = clk->read(clk, nv_clk_src_href);
+ if (!pll.refclk)
+ return 0;
+
+ return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+}
+
+static inline u32
+calc_P(u32 src, u32 target, int *div)
+{
+ u32 clk0 = src, clk1 = src;
+ for (*div = 0; *div <= 7; (*div)++) {
+ if (clk0 <= target) {
+ clk1 = clk0 << (*div ? 1 : 0);
+ break;
+ }
+ clk0 >>= 1;
+ }
+
+ if (target - clk0 <= clk1 - target)
+ return clk0;
+ (*div)--;
+ return clk1;
+}
+
+static int
+nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ const int shader = cstate->domain[nv_clk_src_shader];
+ const int core = cstate->domain[nv_clk_src_core];
+ const int vdec = cstate->domain[nv_clk_src_vdec];
+ u32 out = 0, clock = 0;
+ int N, M, P1, P2 = 0;
+ int divs = 0;
+
+ /* cclk: find suitable source, disable PLL if we can */
+ if (core < clk->read(clk, nv_clk_src_hclkm4))
+ out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+
+ /* Calculate clock * 2, so shader clock can use it too */
+ clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+
+ if (abs(core - out) <=
+ abs(core - (clock >> 1))) {
+ priv->csrc = nv_clk_src_hclkm4;
+ priv->cctrl = divs << 16;
+ } else {
+ /* NVCTRL is actually used _after_ NVPOST, and after what we
+ * call NVPLL. To make matters worse, NVPOST is an integer
+ * divider instead of a right-shift number. */
+ if(P1 > 2) {
+ P2 = P1 - 2;
+ P1 = 2;
+ }
+
+ priv->csrc = nv_clk_src_core;
+ priv->ccoef = (N << 8) | M;
+
+ priv->cctrl = (P2 + 1) << 16;
+ priv->cpost = (1 << P1) << 16;
+ }
+
+ /* sclk: nvpll + divisor, href or spll */
+ out = 0;
+ if (shader == clk->read(clk, nv_clk_src_href)) {
+ priv->ssrc = nv_clk_src_href;
+ } else {
+ clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+ if (priv->csrc == nv_clk_src_core) {
+ out = calc_P((core << 1), shader, &divs);
+ }
+
+ if (abs(shader - out) <=
+ abs(shader - clock) &&
+ (divs + P2) <= 7) {
+ priv->ssrc = nv_clk_src_core;
+ priv->sctrl = (divs + P2) << 16;
+ } else {
+ priv->ssrc = nv_clk_src_shader;
+ priv->scoef = (N << 8) | M;
+ priv->sctrl = P1 << 16;
+ }
+ }
+
+ /* vclk */
+ out = calc_P(core, vdec, &divs);
+ clock = calc_P(500000, vdec, &P1);
+ if(abs(vdec - out) <=
+ abs(vdec - clock)) {
+ priv->vsrc = nv_clk_src_cclk;
+ priv->vdiv = divs << 16;
+ } else {
+ priv->vsrc = nv_clk_src_vdec;
+ priv->vdiv = P1 << 16;
+ }
+
+ /* Print strategy! */
+ nv_debug(priv, "nvpll: %08x %08x %08x\n",
+ priv->ccoef, priv->cpost, priv->cctrl);
+ nv_debug(priv, " spll: %08x %08x %08x\n",
+ priv->scoef, priv->spost, priv->sctrl);
+ nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
+ if (priv->csrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "core: hrefm4\n");
+ else
+ nv_debug(priv, "core: nvpll\n");
+
+ if (priv->ssrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "shader: hrefm4\n");
+ else if (priv->ssrc == nv_clk_src_core)
+ nv_debug(priv, "shader: nvpll\n");
+ else
+ nv_debug(priv, "shader: spll\n");
+
+ if (priv->vsrc == nv_clk_src_hclkm4)
+ nv_debug(priv, "vdec: 500MHz\n");
+ else
+ nv_debug(priv, "vdec: core\n");
+
+ return 0;
+}
+
+static int
+nvaa_clock_prog(struct nouveau_clock *clk)
+{
+ struct nvaa_clock_priv *priv = (void *)clk;
+ struct nouveau_fifo *pfifo = nouveau_fifo(clk);
+ unsigned long flags;
+ u32 pllmask = 0, mast, ptherm_gate;
+ int ret = -EBUSY;
+
+ /* halt and idle execution engines */
+ ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
+ nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+ /* Wait until the interrupt handler is finished */
+ if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+ goto resume;
+
+ if (pfifo)
+ pfifo->pause(pfifo, &flags);
+
+ if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+ goto resume;
+ if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+ goto resume;
+
+ /* First switch to safe clocks: href */
+ mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+ mast &= ~0x00400e73;
+ mast |= 0x03000000;
+
+ switch (priv->csrc) {
+ case nv_clk_src_hclkm4:
+ nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+ mast |= 0x00000002;
+ break;
+ case nv_clk_src_core:
+ nv_wr32(clk, 0x402c, priv->ccoef);
+ nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
+ nv_wr32(clk, 0x4040, priv->cpost);
+ pllmask |= (0x3 << 8);
+ mast |= 0x00000003;
+ break;
+ default:
+ nv_warn(priv,"Reclocking failed: unknown core clock\n");
+ goto resume;
+ }
+
+ switch (priv->ssrc) {
+ case nv_clk_src_href:
+ nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+ /* mast |= 0x00000000; */
+ break;
+ case nv_clk_src_core:
+ nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+ mast |= 0x00000020;
+ break;
+ case nv_clk_src_shader:
+ nv_wr32(clk, 0x4024, priv->scoef);
+ nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
+ nv_wr32(clk, 0x4070, priv->spost);
+ pllmask |= (0x3 << 12);
+ mast |= 0x00000030;
+ break;
+ default:
+ nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+ goto resume;
+ }
+
+ if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
+ nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+ goto resume;
+ }
+
+ switch (priv->vsrc) {
+ case nv_clk_src_cclk:
+ mast |= 0x00400000;
+ default:
+ nv_wr32(clk, 0x4600, priv->vdiv);
+ }
+
+ nv_wr32(clk, 0xc054, mast);
+ ret = 0;
+
+resume:
+ if (pfifo)
+ pfifo->start(pfifo, &flags);
+
+ nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
+ nv_wr32(clk, 0x020060, ptherm_gate);
+
+ /* Disable some PLLs and dividers when unused */
+ if (priv->csrc != nv_clk_src_core) {
+ nv_wr32(clk, 0x4040, 0x00000000);
+ nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+ }
+
+ if (priv->ssrc != nv_clk_src_shader) {
+ nv_wr32(clk, 0x4070, 0x00000000);
+ nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+ }
+
+ return ret;
+}
+
+static void
+nvaa_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static struct nouveau_clocks
+nvaa_domains[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_href , 0xff },
+ { nv_clk_src_core , 0xff, 0, "core", 1000 },
+ { nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+ { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
+ { nv_clk_src_max }
+};
+
+static int
+nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvaa_clock_priv *priv;
+ int ret;
+
+ ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.read = nvaa_clock_read;
+ priv->base.calc = nvaa_clock_calc;
+ priv->base.prog = nvaa_clock_prog;
+ priv->base.tidy = nvaa_clock_tidy;
+ return 0;
+}
+
+struct nouveau_oclass *
+nvaa_clock_oclass = &(struct nouveau_oclass) {
+ .handle = NV_SUBDEV(CLOCK, 0xaa),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvaa_clock_ctor,
+ .dtor = _nouveau_clock_dtor,
+ .init = _nouveau_clock_init,
+ .fini = _nouveau_clock_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index e286e13..1291204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
acpi_handle handle;
int ret;
- handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
+ handle = ACPI_HANDLE(&device->pdev->dev);
if (!handle)
return false;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 3618ac6..32e7064 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -58,8 +58,8 @@ struct nouveau_plane {
};
static uint32_t formats[] = {
- DRM_FORMAT_NV12,
DRM_FORMAT_UYVY,
+ DRM_FORMAT_NV12,
};
/* Sine can be approximated with
@@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur;
bool flip = nv_plane->flip;
- int format = ALIGN(src_w * 4, 0x100);
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
- int ret;
+ int format, ret;
+
+ /* Source parameters given in 16.16 fixed point, ignore fractional. */
+ src_x >>= 16;
+ src_y >>= 16;
+ src_w >>= 16;
+ src_h >>= 16;
+
+ format = ALIGN(src_w * 4, 0x100);
if (format > 0xffff)
- return -EINVAL;
+ return -ERANGE;
+
+ if (dev->chipset >= 0x30) {
+ if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
+ return -ERANGE;
+ } else {
+ if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
+ return -ERANGE;
+ }
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
if (ret)
@@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo;
- /* Source parameters given in 16.16 fixed point, ignore fractional. */
- src_x = src_x >> 16;
- src_y = src_y >> 16;
- src_w = src_w >> 16;
- src_h = src_h >> 16;
-
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
@@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device)
{
struct nouveau_device *dev = nouveau_dev(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+ int num_formats = ARRAY_SIZE(formats);
int ret;
if (!plane)
return;
+ switch (dev->chipset) {
+ case 0x10:
+ case 0x11:
+ case 0x15:
+ case 0x1a:
+ case 0x20:
+ num_formats = 1;
+ break;
+ }
+
ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
&nv10_plane_funcs,
- formats, ARRAY_SIZE(formats), false);
+ formats, num_formats, false);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 07273a2..95c7404 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -256,7 +256,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
acpi_handle dhandle;
int retval = 0;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -414,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
return false;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -448,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle)
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7809d92..29c3efd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -608,6 +608,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fence = nouveau_fence_ref(new_bo->bo.sync_obj);
spin_unlock(&new_bo->bo.bdev->fence_lock);
ret = nouveau_fence_sync(fence, chan);
+ nouveau_fence_unref(&fence);
if (ret)
return ret;
@@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event)
- drm_send_vblank_event(dev, -1, s->event);
+ drm_send_vblank_event(dev, s->crtc, s->event);
list_del(&s->head);
if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 38a4db5..4aff04f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -630,7 +630,6 @@ error:
hwmon->hwmon = NULL;
return ret;
#else
- hwmon->hwmon = NULL;
return 0;
#endif
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f8e66c0..4e384a2 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
+ u32 end = min_t(u32, start + size, 256);
u32 i;
for (i = start; i < end; i++) {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0109a96..821ab7b 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -92,6 +92,7 @@ qxl_release_free(struct qxl_device *qdev,
- DRM_FILE_OFFSET);
qxl_fence_remove_release(&bo->fence, release->id);
qxl_bo_unref(&bo);
+ kfree(entry);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index deaf98c..f685035dbe 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
- u16 out;
+ u16 out = cpu_to_le16(0);
memset(&args, 0, sizeof(args));
@@ -55,9 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
return -EINVAL;
}
- args.ucRegIndex = buf[0];
- if (num > 1)
- memcpy(&out, &buf[1], num - 1);
+ if (buf == NULL)
+ args.ucRegIndex = 0;
+ else
+ args.ucRegIndex = buf[0];
+ if (num)
+ num--;
+ if (num)
+ memcpy(&out, &buf[1], num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
@@ -94,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret;
- u8 buf = 0, flags;
+ u8 flags;
/* check for bus probe */
p = &msgs[0];
if ((num == 1) && (p->len == 0)) {
ret = radeon_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE,
- &buf, 1);
+ NULL, 0);
if (ret)
return ret;
else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ae92aa0..b43a3a3 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
* cik_mm_rdoorbell - read a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
*
* Returns the value in the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
{
- if (offset < rdev->doorbell.size) {
- return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ return readl(rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
}
@@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
* cik_mm_wdoorbell - write a doorbell dword
*
* @rdev: radeon_device pointer
- * @offset: byte offset into the aperture
+ * @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
- * requested offset (CIK).
+ * requested doorbell index (CIK).
*/
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
{
- if (offset < rdev->doorbell.size) {
- writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
+ if (index < rdev->doorbell.num_doorbells) {
+ writel(v, rdev->doorbell.ptr + index);
} else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
+ DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
}
@@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 4) {
@@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 2) {
@@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
}
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
+/* TODO: figure out why semaphore cause lockups */
+#if 0
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
+
+ return true;
+#else
+ return false;
+#endif
}
/**
@@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
- WDOORBELL32(ring->doorbell_offset, ring->wptr);
+ WDOORBELL32(ring->doorbell_index, ring->wptr);
}
/**
@@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
return r;
}
- /* doorbell offset */
- rdev->ring[idx].doorbell_offset =
- (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
-
/* init the mqd struct */
memset(buf, 0, sizeof(struct bonaire_mqd));
@@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
mqd->queue_state.cp_hqd_pq_doorbell_control |=
- DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
+ DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
mqd->queue_state.cp_hqd_pq_doorbell_control &=
~(DOORBELL_SOURCE | DOORBELL_HIT);
@@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
+ r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 9c9529d..0300727 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (CIK).
*/
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
radeon_ring_write(ring, addr & 0xfffffff8);
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+
+ return true;
}
/**
@@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 91bb470..920e1e4 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
static int cypress_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+#endif
u32 tmp;
udelay(10);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 009f46e..de86493 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 offset = dig->afmt->offset;
+ u32 offset;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
+ offset = dig->afmt->offset;
+
WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
@@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct radeon_connector *radeon_connector = NULL;
u32 tmp = 0, offset;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
@@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- if (!dig->afmt->pin)
+ if (!dig || !dig->afmt || !dig->afmt->pin)
return;
offset = dig->afmt->pin->offset;
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 6a0656d..a37b544 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f263390..49c4d48 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
- u32 mclk, sclk;
- u16 vddc, vddci;
+ u32 mclk;
+ u16 vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
@@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
/* XXX validate the min clocks required for display */
+ /* adjust low state */
if (disable_mclk_switching) {
- mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
- vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
- } else {
- sclk = ps->performance_levels[0].sclk;
- mclk = ps->performance_levels[0].mclk;
- vddc = ps->performance_levels[0].vddc;
- vddci = ps->performance_levels[0].vddci;
+ ps->performance_levels[0].mclk =
+ ps->performance_levels[ps->performance_level_count - 1].mclk;
+ ps->performance_levels[0].vddci =
+ ps->performance_levels[ps->performance_level_count - 1].vddci;
}
- /* adjusted low state */
- ps->performance_levels[0].sclk = sclk;
- ps->performance_levels[0].mclk = mclk;
- ps->performance_levels[0].vddc = vddc;
- ps->performance_levels[0].vddci = vddci;
-
btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
&ps->performance_levels[0].sclk,
&ps->performance_levels[0].mclk);
@@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
}
+ /* adjust remaining states */
if (disable_mclk_switching) {
mclk = ps->performance_levels[0].mclk;
+ vddci = ps->performance_levels[0].vddci;
for (i = 1; i < ps->performance_level_count; i++) {
if (mclk < ps->performance_levels[i].mclk)
mclk = ps->performance_levels[i].mclk;
+ if (vddci < ps->performance_levels[i].vddci)
+ vddci = ps->performance_levels[i].vddci;
}
for (i = 0; i < ps->performance_level_count; i++) {
ps->performance_levels[i].mclk = mclk;
@@ -3445,9 +3439,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
static int ni_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
+#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-#if defined(CONFIG_ACPI)
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 784983d..10abc4d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
+ return false;
}
int r100_copy_blit(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4e609e8..9ad0673 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+
+ return true;
}
/**
@@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 3b31745..7844d15 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (r6xx-SI).
*/
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+
+ return true;
}
/**
@@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 4b89262..b7d3ecb 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
- } else if (ASIC_IS_DCE3(rdev)) {
+ } else {
/* according to the reg specs, this should DCE3.2 only, but in
- * practice it seems to cover DCE3.0/3.1 as well.
+ * practice it seems to cover DCE2.0/3.0/3.1 as well.
*/
if (dig->dig_encoder == 0) {
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
@@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
- } else {
- /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
- WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
- AUDIO_DTO_MODULE(clock / 10));
}
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b9ee992..b1f990d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_locked(struct radeon_fence *fence);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
@@ -548,17 +549,20 @@ struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter);
+ int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
@@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* GPU doorbell structures, functions & helpers
*/
+#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
+
struct radeon_doorbell {
- u32 num_pages;
- bool free[1024];
/* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- void __iomem *ptr;
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
+ unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
};
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
@@ -765,7 +771,6 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
- struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
@@ -799,8 +804,7 @@ struct radeon_ring {
u32 pipe;
u32 queue;
struct radeon_bo *mqd_obj;
- u32 doorbell_page_num;
- u32 doorbell_offset;
+ u32 doorbell_index;
unsigned wptr_offs;
};
@@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1638,7 +1641,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1979,6 +1982,7 @@ struct cik_asic {
unsigned tile_config;
uint32_t tile_mode_array[32];
+ uint32_t macrotile_mode_array[16];
};
union radeon_asic_config {
@@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
+u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
+void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
@@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
-#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
-#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
+#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
+#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
/*
* Indirect registers accessor
@@ -2706,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem);
+int radeon_vm_bo_update(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 10f98c7..98a9074 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
return NOTIFY_DONE;
/* Check pending SBIOS requests */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
count = radeon_atif_get_sbios_requests(handle, &req);
if (count <= 0)
@@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
struct radeon_atcs *atcs = &rdev->atcs;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
u32 retry = 3;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
if (!handle)
return -EINVAL;
@@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
int ret;
/* Get the device handle */
- handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ handle = ACPI_HANDLE(&rdev->pdev->dev);
/* No need to proceed if we're sure that ATIF is not supported */
if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 50853c0..e354ce9 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
@@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2833ee..c9fd97b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r100_semaphore_ring_emit(struct radeon_device *rdev,
+bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
@@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_fence_compute_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-void cik_semaphore_ring_emit(struct radeon_device *rdev,
+bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
/* uvd v3.1 */
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f79ee18..5c39bf7 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
mpll_param->dll_speed = args.ucDllSpeed;
mpll_param->bwcntl = args.ucBWCntl;
mpll_param->vco_mode =
- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0;
+ (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
mpll_param->yclk_sel =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
mpll_param->qdr =
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6153ec1..9d302ea 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -8,8 +8,7 @@
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
+#include <linux/acpi.h>
#include <linux/pci.h>
#include "radeon_acpi.h"
@@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
acpi_handle dhandle, atpx_handle;
acpi_status status;
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
@@ -493,7 +492,7 @@ static int radeon_atpx_init(void)
*/
static int radeon_atpx_get_client_id(struct pci_dev *pdev)
{
- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c155d6f..b3633d9 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 26ca223..0b36616 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
+ radeon_semaphore_sync_to(p->ib.semaphore,
+ p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -359,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
struct radeon_bo *bo;
int r;
- r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
if (r) {
return r;
}
list_for_each_entry(lobj, &parser->validated, tv.head) {
bo = lobj->bo;
- r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
if (r) {
return r;
}
@@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_ib_sync_to(&parser->ib, vm->fence);
- radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
- rdev, vm, parser->ring));
+ radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
+ radeon_semaphore_sync_to(parser->ib.semaphore,
+ radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b9234c4..39b033b4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/
int radeon_doorbell_init(struct radeon_device *rdev)
{
- int i;
-
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
- /* limit to 4 MB for now */
- if (rdev->doorbell.size > (4 * 1024 * 1024))
- rdev->doorbell.size = 4 * 1024 * 1024;
+ rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
+ if (rdev->doorbell.num_doorbells == 0)
+ return -EINVAL;
- rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
+ rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
- rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
+ memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- rdev->doorbell.free[i] = true;
- }
return 0;
}
@@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
}
/**
- * radeon_doorbell_get - Allocate a doorbell page
+ * radeon_doorbell_get - Allocate a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Allocate a doorbell page for use by the driver (all asics).
+ * Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{
- int i;
-
- for (i = 0; i < rdev->doorbell.num_pages; i++) {
- if (rdev->doorbell.free[i]) {
- rdev->doorbell.free[i] = false;
- *doorbell = i;
- return 0;
- }
+ unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
+ if (offset < rdev->doorbell.num_doorbells) {
+ __set_bit(offset, rdev->doorbell.used);
+ *doorbell = offset;
+ return 0;
+ } else {
+ return -EINVAL;
}
- return -EINVAL;
}
/**
- * radeon_doorbell_free - Free a doorbell page
+ * radeon_doorbell_free - Free a doorbell entry
*
* @rdev: radeon_device pointer
- * @doorbell: doorbell page number
+ * @doorbell: doorbell index
*
- * Free a doorbell page allocated for use by the driver (all asics)
+ * Free a doorbell allocated for use by the driver (all asics)
*/
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{
- if (doorbell < rdev->doorbell.num_pages)
- rdev->doorbell.free[doorbell] = true;
+ if (doorbell < rdev->doorbell.num_doorbells)
+ __clear_bit(doorbell, rdev->doorbell.used);
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1aee322..9f5ff28 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -76,9 +76,10 @@
* 2.32.0 - new info request for rings working
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
+ * 2.35.0 - Add CIK macrotile mode array query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 34
+#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 543dcfa..00e0d44 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -108,9 +108,10 @@
* 1.31- Add support for num Z pipes from GET_PARAM
* 1.32- fixes for rv740 setup
* 1.33- Add r6xx/r7xx const buffer support
+ * 1.34- fix evergreen/cayman GS register
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 33
+#define DRIVER_MINOR 34
#define DRIVER_PATCHLEVEL 0
long radeon_drm_ioctl(struct file *filp,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 281d14c..d3a86e4 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -472,6 +472,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
}
/**
+ * radeon_fence_wait_locked - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ *
+ * Wait for the requested fence to signal (all asics).
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_locked(struct radeon_fence *fence)
+{
+ uint64_t seq[RADEON_NUM_RINGS] = {};
+ int r;
+
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return -EINVAL;
+ }
+
+ seq[fence->ring] = fence->seq;
+ if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+ return 0;
+
+ r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
+ if (r)
+ return r;
+
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+/**
* radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 8a83b89..96e4400 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -29,6 +29,7 @@
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_reg.h"
+#include "radeon_trace.h"
/*
* GART
@@ -651,7 +652,7 @@ retry:
radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
0, pd_entries, 0, 0);
- radeon_ib_sync_to(&ib, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
for (i = 0; i < 2; ++i) {
if (choices[i]) {
vm->id = choices[i];
+ trace_radeon_vm_grab_id(vm->id, ring);
return rdev->vm_manager.active[choices[i]];
}
}
@@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
/**
- * radeon_vm_bo_update_pte - map a bo into the vm page table
+ * radeon_vm_bo_update - map a bo into the vm page table
*
* @rdev: radeon_device pointer
* @vm: requested vm
@@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
*
* Object have to be reserved & global and local mutex must be locked!
*/
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
- struct ttm_mem_reg *mem)
+int radeon_vm_bo_update(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
{
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
@@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false;
}
+ trace_radeon_vm_bo_update(bo_va);
+
nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */
@@ -1209,6 +1213,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -ENOMEM;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+ if (r)
+ return r;
ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1220,7 +1226,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, radeon_vm_page_flags(bo_va->flags));
- radeon_ib_sync_to(&ib, vm->fence);
+ radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -1255,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset) {
- r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+ r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
}
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bb87105..55d0b47 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_BONAIRE)
- return -EINVAL;
+ *value = rdev->config.cik.backend_map;
else if (rdev->family >= CHIP_TAHITI)
*value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
@@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
+ if (rdev->family >= CHIP_BONAIRE) {
+ value = rdev->config.cik.macrotile_mode_array;
+ value_size = sizeof(uint32_t)*16;
+ } else {
+ DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
+ return -EINVAL;
+ }
+ break;
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 0c7b8c6..0b158f9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+retry:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
&base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
+
+ /* On old GPU like RN50 with little vram pining can fails because
+ * current fb is taking all space needed. So instead of unpining
+ * the old buffer after pining the new one, first unpin old one
+ * and then retry pining new one.
+ *
+ * As only master can set mode only master can pin and it is
+ * unlikely the master client will race with itself especialy
+ * on those old gpu with single crtc.
+ *
+ * We don't shutdown the display controller because new buffer
+ * will end up in same spot.
+ */
+ if (!atomic && fb && fb != crtc->fb) {
+ struct radeon_bo *old_rbo;
+ unsigned long nsize, osize;
+
+ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
+ osize = radeon_bo_size(old_rbo);
+ nsize = radeon_bo_size(rbo);
+ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
+ radeon_bo_unpin(old_rbo);
+ radeon_bo_unreserve(old_rbo);
+ fb = NULL;
+ goto retry;
+ }
+ }
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 866ace0..984097b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
int temp;
if (rdev->asic->pm.get_temperature)
@@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
@@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
-static ssize_t radeon_hwmon_show_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "radeon\n");
-}
-
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
NULL
};
@@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
/* Skip limit attributes if DPM is not enabled */
if (rdev->pm.pm_method != PM_METHOD_DPM &&
@@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = {
.is_visible = hwmon_attributes_visible,
};
+static const struct attribute_group *hwmon_groups[] = {
+ &hwmon_attrgroup,
+ NULL
+};
+
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
-
- rdev->pm.int_hwmon_dev = NULL;
+ struct device *hwmon_dev;
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
@@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
- rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
- if (IS_ERR(rdev->pm.int_hwmon_dev)) {
- err = PTR_ERR(rdev->pm.int_hwmon_dev);
+ hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
+ "radeon", rdev,
+ hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
dev_err(rdev->dev,
"Unable to register hwmon device: %d\n", err);
- break;
- }
- dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
- err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
- &hwmon_attrgroup);
- if (err) {
- dev_err(rdev->dev,
- "Unable to create hwmon sysfs file: %d\n", err);
- hwmon_device_unregister(rdev->dev);
}
break;
default:
@@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
return err;
}
-static void radeon_hwmon_fini(struct radeon_device *rdev)
-{
- if (rdev->pm.int_hwmon_dev) {
- sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
- hwmon_device_unregister(rdev->pm.int_hwmon_dev);
- }
-}
-
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =
@@ -1252,7 +1229,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
case CHIP_CAYMAN:
- case CHIP_ARUBA:
case CHIP_BONAIRE:
case CHIP_KABINI:
case CHIP_KAVERI:
@@ -1284,6 +1260,7 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
+ case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
@@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
-
- radeon_hwmon_fini(rdev);
}
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
-
- radeon_hwmon_fini(rdev);
}
void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 18254e1..9214403 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
{
- int i, r;
+ int r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
@@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
ib->is_const_ib = false;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- ib->sync_to[i] = NULL;
return 0;
}
@@ -109,25 +107,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
- * radeon_ib_sync_to - sync to fence before executing the IB
- *
- * @ib: IB object to add fence to
- * @fence: fence to sync to
- *
- * Sync to the fence before executing the IB
- */
-void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = ib->sync_to[fence->ring];
- ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
-/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
@@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- bool need_sync = false;
- int i, r = 0;
+ int r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
@@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_fence *fence = ib->sync_to[i];
- if (radeon_fence_need_sync(fence, ib->ring)) {
- need_sync = true;
- radeon_semaphore_sync_rings(rdev, ib->semaphore,
- fence->ring, ib->ring);
- radeon_fence_note_sync(fence, ib->ring);
- }
- }
- /* immediately free semaphore when we don't need to sync */
- if (!need_sync) {
- radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
}
+
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush for every IB */
if (ib->vm /*&& !ib->vm->last_flush*/) {
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 8dcc20f..2b42aa1 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -29,12 +29,12 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
-
+#include "radeon_trace.h"
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- int r;
+ int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
@@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ (*semaphore)->sync_to[i] = NULL;
+
return 0;
}
-void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- --semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_signale(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
+ --semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_signal_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
}
-void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
- ++semaphore->waiters;
- radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ trace_radeon_semaphore_wait(ridx, semaphore);
+
+ if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
+ ++semaphore->waiters;
+
+ /* for debugging lockup only, used by sysfs debug files */
+ ring->last_semaphore_wait_addr = semaphore->gpu_addr;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_semaphore_sync_to - use the semaphore to sync to a fence
+ *
+ * @semaphore: semaphore object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using this semaphore object
+ */
+void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = semaphore->sync_to[fence->ring];
+ semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
-/* caller must hold ring lock */
+/**
+ * radeon_semaphore_sync_rings - sync ring to all registered fences
+ *
+ * @rdev: radeon_device pointer
+ * @semaphore: semaphore object to use for sync
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- int signaler, int waiter)
+ int ring)
{
- int r;
+ int i, r;
- /* no need to signal and wait on the same ring */
- if (signaler == waiter) {
- return 0;
- }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = semaphore->sync_to[i];
- /* prevent GPU deadlocks */
- if (!rdev->ring[signaler].ready) {
- dev_err(rdev->dev, "Trying to sync to a disabled ring!");
- return -EINVAL;
- }
+ /* check if we really need to sync */
+ if (!radeon_fence_need_sync(fence, ring))
+ continue;
- r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
- if (r) {
- return r;
- }
- radeon_semaphore_emit_signal(rdev, signaler, semaphore);
- radeon_ring_commit(rdev, &rdev->ring[signaler]);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Syncing to a disabled ring!");
+ return -EINVAL;
+ }
- /* we assume caller has already allocated space on waiters ring */
- radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* allocate enough space for sync command */
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
+ if (r) {
+ return r;
+ }
- /* for debugging lockup only, used by sysfs debug files */
- rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
- rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+ /* emit the signal semaphore */
+ if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
+ /* signaling wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ /* we assume caller has already allocated space on waiters ring */
+ if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
+ /* waiting wasn't successful wait manually */
+ radeon_ring_undo(&rdev->ring[i]);
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[i]);
+ radeon_fence_note_sync(fence, ring);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 811bca6..0473257 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs,
__entry->fences)
);
+TRACE_EVENT(radeon_vm_grab_id,
+ TP_PROTO(unsigned vmid, int ring),
+ TP_ARGS(vmid, ring),
+ TP_STRUCT__entry(
+ __field(u32, vmid)
+ __field(u32, ring)
+ ),
+
+ TP_fast_assign(
+ __entry->vmid = vmid;
+ __entry->ring = ring;
+ ),
+ TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
+);
+
+TRACE_EVENT(radeon_vm_bo_update,
+ TP_PROTO(struct radeon_bo_va *bo_va),
+ TP_ARGS(bo_va),
+ TP_STRUCT__entry(
+ __field(u64, soffset)
+ __field(u64, eoffset)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->soffset = bo_va->soffset;
+ __entry->eoffset = bo_va->eoffset;
+ __entry->flags = bo_va->flags;
+ ),
+ TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+ __entry->soffset, __entry->eoffset, __entry->flags)
+);
+
TRACE_EVENT(radeon_vm_set_page,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags),
@@ -111,6 +144,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
TP_ARGS(dev, seqno)
);
+DECLARE_EVENT_CLASS(radeon_semaphore_request,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem),
+
+ TP_STRUCT__entry(
+ __field(int, ring)
+ __field(signed, waiters)
+ __field(uint64_t, gpu_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->waiters = sem->waiters;
+ __entry->gpu_addr = sem->gpu_addr;
+ ),
+
+ TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
+ __entry->waiters, __entry->gpu_addr)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
+DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
+
+ TP_PROTO(int ring, struct radeon_semaphore *sem),
+
+ TP_ARGS(ring, sem)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index a072fa8..d46b58d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x000089B0 VGT_HS_OFFCHIP_PARAM
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -532,7 +532,7 @@ cayman 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
-0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028B90 VGT_GS_INSTANCE_CNT
0x00028BD4 PA_SC_CENTROID_PRIORITY_0
0x00028BD8 PA_SC_CENTROID_PRIORITY_1
0x00028BDC PA_SC_LINE_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b912a37..57745c8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
@@ -545,7 +545,7 @@ evergreen 0x9400
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
-0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028B90 VGT_GS_INSTANCE_CNT
0x00028C00 PA_SC_LINE_CNTL
0x00028C08 PA_SU_VTX_CNTL
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index f9b02e3..aca8cbe 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6a64cca..a36736d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3882,8 +3882,15 @@ static int si_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ tmp = RREG32(CONFIG_MEMSIZE);
+ /* some boards may have garbage in the upper 16 bits */
+ if (tmp & 0xffff0000) {
+ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+ if (tmp & 0xffff)
+ tmp &= 0xffff;
+ }
+ rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 8e8f461..59be2cf 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 9364129..d700698 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
pi->enable_gfx_clock_gating = true;
- pi->enable_mg_clock_gating = true;
- pi->enable_gfx_dynamic_mgpg = true; /* ??? */
- pi->override_dynamic_mgpg = true;
+ pi->enable_mg_clock_gating = false;
+ pi->enable_gfx_dynamic_mgpg = false;
+ pi->override_dynamic_mgpg = false;
pi->enable_auto_thermal_throttling = true;
pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
pi->uvd_dpm = true; /* ??? */
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805..d4a68af 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+ return true;
}
/**
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
index 5b6fa1f..d722db2 100644
--- a/drivers/gpu/drm/radeon/uvd_v3_1.c
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -37,7 +37,7 @@
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
-void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+
+ return true;
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 28e1781..07eba59 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_relocs = args->num_relocs;
unsigned int num_waitchks = args->num_waitchks;
struct drm_tegra_cmdbuf __user *cmdbufs =
- (void * __user)(uintptr_t)args->cmdbufs;
+ (void __user *)(uintptr_t)args->cmdbufs;
struct drm_tegra_reloc __user *relocs =
- (void * __user)(uintptr_t)args->relocs;
+ (void __user *)(uintptr_t)args->relocs;
struct drm_tegra_waitchk __user *waitchks =
- (void * __user)(uintptr_t)args->waitchks;
+ (void __user *)(uintptr_t)args->waitchks;
struct drm_tegra_syncpt syncpt;
struct host1x_job *job;
int err;
@@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo;
- err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
- if (err)
+ if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
+ err = -EFAULT;
goto fail;
+ }
bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
if (!bo) {
@@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
cmdbufs++;
}
- err = copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs);
- if (err)
+ if (copy_from_user(job->relocarray, relocs,
+ sizeof(*relocs) * num_relocs)) {
+ err = -EFAULT;
goto fail;
+ }
while (num_relocs--) {
struct host1x_reloc *reloc = &job->relocarray[num_relocs];
@@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
}
- err = copy_from_user(job->waitchk, waitchks,
- sizeof(*waitchks) * num_waitchks);
- if (err)
+ if (copy_from_user(job->waitchk, waitchks,
+ sizeof(*waitchks) * num_waitchks)) {
+ err = -EFAULT;
goto fail;
+ }
- err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
- sizeof(syncpt));
- if (err)
+ if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
+ sizeof(syncpt))) {
+ err = -EFAULT;
goto fail;
+ }
job->is_addr_reg = context->client->ops->is_addr_reg;
job->syncpt_incrs = syncpt.incrs;
@@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor)
}
#endif
-struct drm_driver tegra_drm_driver = {
+static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fdfe259..7da0b92 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client)
static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
{
- return container_of(crtc, struct tegra_dc, base);
+ return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
}
static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 490f771..a3835e7 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info->var.yoffset * fb->pitches[0];
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
- info->screen_base = bo->vaddr + offset;
+ info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->paddr + offset);
info->fix.smem_len = size;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index ba47ca4..3b29018 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -14,6 +14,8 @@
struct tegra_rgb {
struct tegra_output output;
+ struct tegra_dc *dc;
+
struct clk *clk_parent;
struct clk *clk;
};
@@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
static int tegra_output_rgb_enable(struct tegra_output *output)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_rgb *rgb = to_rgb(output);
- tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+ tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
return 0;
}
static int tegra_output_rgb_disable(struct tegra_output *output)
{
- struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct tegra_rgb *rgb = to_rgb(output);
- tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
return 0;
}
@@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->output.dev = dc->dev;
rgb->output.of_node = np;
+ rgb->dc = dc;
err = tegra_output_probe(&rgb->output);
if (err < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d5a646..07e02c4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
atomic_dec(&bo->glob->bo_count);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
-
+ mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
@@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
+ mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
+
+/**
+ * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
+ * unreserved
+ *
+ * @bo: Pointer to buffer
+ */
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ /*
+ * In the absense of a wait_unlocked API,
+ * Use the bo::wu_mutex to avoid triggering livelocks due to
+ * concurrent use of this function. Note that this use of
+ * bo::wu_mutex can go away if we change locking order to
+ * mmap_sem -> bo::reserve.
+ */
+ ret = mutex_lock_interruptible(&bo->wu_mutex);
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+ if (!ww_mutex_is_locked(&bo->resv->lock))
+ goto out_unlock;
+ ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ ww_mutex_unlock(&bo->resv->lock);
+
+out_unlock:
+ mutex_unlock(&bo->wu_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4834c46..15b86a9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -350,10 +350,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out2;
/*
- * Move nonexistent data. NOP.
+ * Don't move nonexistent data. Clear destination instead.
*/
- if (old_iomap == NULL && ttm == NULL)
+ if (old_iomap == NULL &&
+ (ttm == NULL || ttm->state == tt_unpopulated)) {
+ memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
+ }
/*
* TTM might be null for moves within the same region.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ac617f3..b249ab9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after scheduling.
+ * for reserve, and if it fails, retry the fault after waiting
+ * for the buffer to become unreserved.
*/
-
- ret = ttm_bo_reserve(bo, true, true, false, 0);
+ ret = ttm_bo_reserve(bo, true, true, false, NULL);
if (unlikely(ret != 0)) {
- if (ret == -EBUSY)
- set_need_resched();
+ if (ret != -EBUSY)
+ return VM_FAULT_NOPAGE;
+
+ if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ up_read(&vma->vm_mm->mmap_sem);
+ (void) ttm_bo_wait_unreserved(bo);
+ }
+
+ return VM_FAULT_RETRY;
+ }
+
+ /*
+ * If we'd want to change locking order to
+ * mmap_sem -> bo::reserve, we'd use a blocking reserve here
+ * instead of retrying the fault...
+ */
return VM_FAULT_NOPAGE;
}
@@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
case 0:
break;
case -EBUSY:
- set_need_resched();
case -ERESTARTSYS:
retval = VM_FAULT_NOPAGE;
goto out_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 6c91178..479e941 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,8 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
- struct ww_acquire_ctx *ticket)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
- ww_acquire_fini(ticket);
+ ttm_eu_backoff_reservation_locked(list);
+ if (ticket)
+ ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
- ww_acquire_init(ticket, &reservation_ww_class);
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
if (entry->reserved)
continue;
-
- ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
+ ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
+ ticket);
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
+ BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
}
}
- ww_acquire_done(ticket);
+ if (ticket)
+ ww_acquire_done(ticket);
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
err:
spin_lock(&glob->lru_lock);
- ttm_eu_backoff_reservation_locked(list, ticket);
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
+ }
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
- ww_acquire_fini(ticket);
+ if (ticket)
+ ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index a868176..6fe7b92 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,12 @@
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * While no substantial code is shared, the prime code is inspired by
+ * drm_prime.c, with
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
*/
/** @file ttm_ref_object.c
*
@@ -34,6 +40,7 @@
* and release on file close.
*/
+
/**
* struct ttm_object_file
*
@@ -84,6 +91,9 @@ struct ttm_object_device {
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
+ struct dma_buf_ops ops;
+ void (*dmabuf_release)(struct dma_buf *dma_buf);
+ size_t dma_buf_size;
};
/**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
struct ttm_object_file *tfile;
};
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
+
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
@@ -416,9 +428,10 @@ out_err:
}
EXPORT_SYMBOL(ttm_object_file_init);
-struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
- *mem_glob,
- unsigned int hash_order)
+struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
+ if (ret != 0)
+ goto out_no_object_hash;
- if (likely(ret == 0))
- return tdev;
+ tdev->ops = *ops;
+ tdev->dmabuf_release = tdev->ops.release;
+ tdev->ops.release = ttm_prime_dmabuf_release;
+ tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
+ ttm_round_pot(sizeof(struct file));
+ return tdev;
+out_no_object_hash:
kfree(tdev);
return NULL;
}
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
kfree(tdev);
}
EXPORT_SYMBOL(ttm_object_device_release);
+
+/**
+ * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
+ *
+ * @dma_buf: Non-refcounted pointer to a struct dma-buf.
+ *
+ * Obtain a file reference from a lookup structure that doesn't refcount
+ * the file, but synchronizes with its release method to make sure it has
+ * not been freed yet. See for example kref_get_unless_zero documentation.
+ * Returns true if refcounting succeeds, false otherwise.
+ *
+ * Nobody really wants this as a public API yet, so let it mature here
+ * for some time...
+ */
+static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
+{
+ return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+}
+
+/**
+ * ttm_prime_refcount_release - refcount release method for a prime object.
+ *
+ * @p_base: Pointer to ttm_base_object pointer.
+ *
+ * This is a wrapper that calls the refcount_release founction of the
+ * underlying object. At the same time it cleans up the prime object.
+ * This function is called when all references to the base object we
+ * derive from are gone.
+ */
+static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_prime_object *prime;
+
+ *p_base = NULL;
+ prime = container_of(base, struct ttm_prime_object, base);
+ BUG_ON(prime->dma_buf != NULL);
+ mutex_destroy(&prime->mutex);
+ if (prime->refcount_release)
+ prime->refcount_release(&base);
+}
+
+/**
+ * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
+ *
+ * @dma_buf:
+ *
+ * This function first calls the dma_buf release method the driver
+ * provides. Then it cleans up our dma_buf pointer used for lookup,
+ * and finally releases the reference the dma_buf has on our base
+ * object.
+ */
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct ttm_prime_object *prime =
+ (struct ttm_prime_object *) dma_buf->priv;
+ struct ttm_base_object *base = &prime->base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ if (tdev->dmabuf_release)
+ tdev->dmabuf_release(dma_buf);
+ mutex_lock(&prime->mutex);
+ if (prime->dma_buf == dma_buf)
+ prime->dma_buf = NULL;
+ mutex_unlock(&prime->mutex);
+ ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
+ ttm_base_object_unref(&base);
+}
+
+/**
+ * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @fd: The prime / dmabuf fd.
+ * @handle: The returned handle.
+ *
+ * This function returns a handle to an object that previously exported
+ * a dma-buf. Note that we don't handle imports yet, because we simply
+ * have no consumers of that implementation.
+ */
+int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ struct ttm_base_object *base;
+ int ret;
+
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &tdev->ops)
+ return -ENOSYS;
+
+ prime = (struct ttm_prime_object *) dma_buf->priv;
+ base = &prime->base;
+ *handle = base->hash.key;
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+
+ dma_buf_put(dma_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
+
+/**
+ * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
+ *
+ * @tfile: Struct ttm_object_file identifying the caller.
+ * @handle: Handle to the object we're exporting from.
+ * @flags: flags for dma-buf creation. We just pass them on.
+ * @prime_fd: The returned file descriptor.
+ *
+ */
+int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct dma_buf *dma_buf;
+ struct ttm_prime_object *prime;
+ int ret;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL ||
+ base->object_type != ttm_prime_type)) {
+ ret = -ENOENT;
+ goto out_unref;
+ }
+
+ prime = container_of(base, struct ttm_prime_object, base);
+ if (unlikely(!base->shareable)) {
+ ret = -EPERM;
+ goto out_unref;
+ }
+
+ ret = mutex_lock_interruptible(&prime->mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_unref;
+ }
+
+ dma_buf = prime->dma_buf;
+ if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+
+ /*
+ * Need to create a new dma_buf, with memory accounting.
+ */
+ ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ dma_buf = dma_buf_export(prime, &tdev->ops,
+ prime->size, flags);
+ if (IS_ERR(dma_buf)) {
+ ret = PTR_ERR(dma_buf);
+ ttm_mem_global_free(tdev->mem_glob,
+ tdev->dma_buf_size);
+ mutex_unlock(&prime->mutex);
+ goto out_unref;
+ }
+
+ /*
+ * dma_buf has taken the base object reference
+ */
+ base = NULL;
+ prime->dma_buf = dma_buf;
+ }
+ mutex_unlock(&prime->mutex);
+
+ ret = dma_buf_fd(dma_buf, flags);
+ if (ret >= 0) {
+ *prime_fd = ret;
+ ret = 0;
+ } else
+ dma_buf_put(dma_buf);
+
+out_unref:
+ if (base)
+ ttm_base_object_unref(&base);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
+
+/**
+ * ttm_prime_object_init - Initialize a ttm_prime_object
+ *
+ * @tfile: struct ttm_object_file identifying the caller
+ * @size: The size of the dma_bufs we export.
+ * @prime: The object to be initialized.
+ * @shareable: See ttm_base_object_init
+ * @type: See ttm_base_object_init
+ * @refcount_release: See ttm_base_object_init
+ * @ref_obj_release: See ttm_base_object_init
+ *
+ * Initializes an object which is compatible with the drm_prime model
+ * for data sharing between processes and devices.
+ */
+int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
+ struct ttm_prime_object *prime, bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ mutex_init(&prime->mutex);
+ prime->size = PAGE_ALIGN(size);
+ prime->real_type = type;
+ prime->dma_buf = NULL;
+ prime->refcount_release = refcount_release;
+ return ttm_base_object_init(tfile, &prime->base, shareable,
+ ttm_prime_type,
+ ttm_prime_refcount_release,
+ ref_obj_release);
+}
+EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 24ffbe9..8d67b94 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
static void udl_gem_put_pages(struct udl_gem_object *obj)
{
+ if (obj->base.import_attach) {
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return;
+ }
+
drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2cc6cd9..9f8b690 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o
+ vmwgfx_surface.o vmwgfx_prime.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7776e6f..0489c61 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -150,6 +150,8 @@ struct vmw_ttm_tt {
bool mapped;
};
+const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
+
/**
* Helper functions to advance a struct vmw_piter iterator.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 20d5485..c7a5496 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->tdev = ttm_object_device_init
- (dev_priv->mem_global_ref.object, 12);
+ (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_MODESET,
+ DRIVER_MODESET | DRIVER_PRIME,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
@@ -1235,6 +1235,9 @@ static struct drm_driver driver = {
.dumb_map_offset = vmw_dumb_map_offset,
.dumb_destroy = vmw_dumb_destroy,
+ .prime_fd_to_handle = vmw_prime_fd_to_handle,
+ .prime_handle_to_fd = vmw_prime_handle_to_fd,
+
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e401d5d..20890ad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
* TTM buffer object driver - vmwgfx_buffer.c
*/
+extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
@@ -819,6 +820,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
/**
+ * Prime - vmwgfx_prime.c
+ */
+
+extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
+extern int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle);
+extern int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+
+/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ecb3d86..03f1c20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
+ drm_sysfs_connector_remove(&du->connector);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 79f7e8e..a055a26 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->fb = NULL;
+ crtc->enabled = false;
vmw_ldu_del_active(dev_priv, ldu);
@@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
crtc->x = set->x;
crtc->y = set->y;
crtc->mode = *mode;
+ crtc->enabled = true;
vmw_ldu_add_active(dev_priv, ldu, vfb);
@@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
+ (void) drm_sysfs_connector_add(connector);
+
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644
index 0000000..31fe32d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thellstrom@vmware.com>
+ *
+ */
+
+#include "vmwgfx_drv.h"
+#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_object.h>
+
+/*
+ * DMA-BUF attach- and mapping methods. No need to implement
+ * these until we have other virtual devices use them.
+ */
+
+static int vmw_prime_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+{
+ return -ENOSYS;
+}
+
+static void vmw_prime_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+}
+
+static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgb,
+ enum dma_data_direction dir)
+{
+}
+
+static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+}
+
+static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
+ return -ENOSYS;
+}
+
+const struct dma_buf_ops vmw_prime_dmabuf_ops = {
+ .attach = vmw_prime_map_attach,
+ .detach = vmw_prime_map_detach,
+ .map_dma_buf = vmw_prime_map_dma_buf,
+ .unmap_dma_buf = vmw_prime_unmap_dma_buf,
+ .release = NULL,
+ .kmap = vmw_prime_dmabuf_kmap,
+ .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
+ .kunmap = vmw_prime_dmabuf_kunmap,
+ .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
+ .mmap = vmw_prime_dmabuf_mmap,
+ .vmap = vmw_prime_dmabuf_vmap,
+ .vunmap = vmw_prime_dmabuf_vunmap,
+};
+
+int vmw_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv,
+ int fd, u32 *handle)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_fd_to_handle(tfile, fd, handle);
+}
+
+int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 252501a..9b5ea2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,7 +35,7 @@
#define VMW_RES_EVICT_ERR_COUNT 10
struct vmw_user_dma_buffer {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_dma_buffer dma;
};
@@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != converter->object_type))
+ if (unlikely(ttm_base_object_type(base) != converter->object_type))
goto out_bad_resource;
res = converter->base_obj_to_res(base);
@@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
/**
* Buffer management.
*/
+
+/**
+ * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
+ bool user)
+{
+ static size_t struct_size, user_struct_size;
+ size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+ if (unlikely(struct_size == 0)) {
+ size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+ struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ user_struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
+ }
+
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ page_array_size +=
+ ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+ return ((user) ? user_struct_size : struct_size) +
+ page_array_size;
+}
+
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
@@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
kfree(vmw_bo);
}
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+
+ ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
@@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
+ bool user = (bo_free == &vmw_user_dmabuf_destroy);
- BUG_ON(!bo_free);
+ BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+ acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
+ (user) ? ttm_bo_type_device :
+ ttm_bo_type_kernel, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
}
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
- ttm_base_object_kfree(vmw_user_bo, base);
-}
-
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
@@ -401,7 +435,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
if (unlikely(base == NULL))
return;
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
bo = &vmw_user_bo->dma.base;
ttm_bo_unref(&bo);
}
@@ -442,18 +477,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
return ret;
tmp = ttm_bo_reference(&user_bo->dma.base);
- ret = ttm_base_object_init(tfile,
- &user_bo->base,
- shareable,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*p_dma_buf = &user_bo->dma;
- *handle = user_bo->base.hash.key;
+ *handle = user_bo->prime.base.hash.key;
out_no_base_object:
return ret;
@@ -475,8 +511,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo);
- return (vmw_user_bo->base.tfile == tfile ||
- vmw_user_bo->base.shareable) ? 0 : -EPERM;
+ return (vmw_user_bo->prime.base.tfile == tfile ||
+ vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -538,14 +574,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return -ESRCH;
}
- if (unlikely(base->object_type != ttm_buffer_type)) {
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
@@ -562,7 +599,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
- return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL);
}
/*
@@ -777,53 +815,55 @@ err_ref:
}
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
+ * that the arguments have a different format.
+ */
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
+ struct vmw_dma_buffer *dma_buf;
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (vmw_user_bo == NULL)
- return -ENOMEM;
-
ret = ttm_read_lock(&vmaster->lock, true);
- if (ret != 0) {
- kfree(vmw_user_bo);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
- if (ret != 0)
- goto out_no_dmabuf;
-
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ args->size, false, &args->handle,
+ &dma_buf);
if (unlikely(ret != 0))
- goto out_no_base_object;
-
- args->handle = vmw_user_bo->base.hash.key;
+ goto out_no_dmabuf;
-out_no_base_object:
- ttm_bo_unref(&tmp);
+ vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
return ret;
}
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
@@ -841,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
return 0;
}
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
@@ -994,7 +1043,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
*/
static int
vmw_resource_check_buffer(struct vmw_resource *res,
- struct ww_acquire_ctx *ticket,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@@ -1011,7 +1059,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -1029,7 +1077,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
@@ -1074,8 +1122,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
* @val_buf: Backup buffer information.
*/
static void
-vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@@ -1084,7 +1131,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
- ttm_eu_backoff_reservation(ticket, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@@ -1099,14 +1146,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
- struct ww_acquire_ctx ticket;
int ret;
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- ret = vmw_resource_check_buffer(res, &ticket, interruptible,
- &val_buf);
+ ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1121,7 +1166,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
- vmw_resource_backoff_reservation(&ticket, &val_buf);
+ vmw_resource_backoff_reservation(&val_buf);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 26387c3..22406c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
+ crtc->enabled = false;
vmw_sou_del_active(dev_priv, sou);
@@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
+ crtc->enabled = false;
return ret;
}
@@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
+ crtc->enabled = true;
return 0;
}
@@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
+ (void) drm_sysfs_connector_add(connector);
+
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
drm_mode_crtc_set_gamma_size(crtc, 256);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5828143..7de2ea8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -38,7 +38,7 @@
* @size: TTM accounting size for the surface.
*/
struct vmw_user_surface {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
uint32_t backup_handle;
@@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
- return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+ return &(container_of(base, struct vmw_user_surface,
+ prime.base)->srf.res);
}
/**
@@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
@@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
+ container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
@@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf->snooper.crtc = NULL;
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
/**
* From this point, the generic resource management functions
@@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
+ ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->sid = user_srf->base.hash.key;
+ rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock);
@@ -823,7 +824,7 @@ out_no_copy:
out_no_offsets:
kfree(srf->sizes);
out_no_sizes:
- ttm_base_object_kfree(user_srf, base);
+ ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
@@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
+ user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+ TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;
OpenPOWER on IntegriCloud