summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-06-18 10:53:12 +0200
committerIngo Molnar <mingo@elte.hu>2010-06-18 10:53:19 +0200
commit646b1db4956ba8bf748b835b5eba211133d91c2e (patch)
tree061166d873d9da9cf83044a7593ad111787076c5 /drivers/gpu
parent0f2c3de2ba110626515234d5d584fb1b0c0749a2 (diff)
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
downloadop-kernel-dev-646b1db4956ba8bf748b835b5eba211133d91c2e.zip
op-kernel-dev-646b1db4956ba8bf748b835b5eba211133d91c2e.tar.gz
Merge commit 'v2.6.35-rc3' into perf/core
Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c28
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c82
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c734
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c70
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h210
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c897
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c182
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h82
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c304
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c11
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c849
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h124
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c96
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c7
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c15
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1356
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h464
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r420.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c170
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen611
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c173
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c189
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
-rw-r--r--drivers/gpu/vga/vgaarb.c61
74 files changed, 6364 insertions, 1477 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 994d23b..57cea01 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1840,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
- if (ret)
+ if (ret) {
+ ret = -EFAULT;
goto out_err2;
+ }
}
if (fb->funcs->dirty) {
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7644019..9b2a541 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work)
}
}
-void drm_kms_helper_poll_init(struct drm_device *dev)
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
{
- struct drm_connector *connector;
bool poll = false;
+ struct drm_connector *connector;
int ret;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
}
- slow_work_register_user(THIS_MODULE);
- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
- &output_poll_ops);
if (poll) {
ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
@@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
DRM_ERROR("delayed enqueue failed %d\n", ret);
}
}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ slow_work_register_user(THIS_MODULE);
+ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+ &output_poll_ops);
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ drm_kms_helper_poll_disable(dev);
slow_work_unregister_user(THIS_MODULE);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b3779d2..08c4c92 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -264,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void)
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
- DRM_ERROR("panic occurred, switching back to text console\n");
+ printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
return 0;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9563901..da78f2c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
+ intel_ringbuffer.o \
intel_overlay.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 322070c..52510ad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
lock = &dev_priv->mm.active_list_lock;
- head = &dev_priv->mm.active_list;
+ head = &dev_priv->render_ring.active_list;
break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *gem_request;
seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+ list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
+ list) {
seq_printf(m, " %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
@@ -143,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->hw_status_page != NULL) {
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ i915_get_gem_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -195,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- if (dev_priv->hw_status_page != NULL) {
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ i915_get_gem_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -251,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
int i;
volatile u32 *hws;
- hws = (volatile u32 *)dev_priv->hw_status_page;
+ hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
if (hws == NULL)
return 0;
@@ -287,7 +288,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+ list) {
obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
@@ -317,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
u8 *virt;
uint32_t *ptr, off;
- if (!dev_priv->ring.ring_obj) {
+ if (!dev_priv->render_ring.gem_object) {
seq_printf(m, "No ringbuffer setup\n");
return 0;
}
- virt = dev_priv->ring.virtual_start;
+ virt = dev_priv->render_ring.virtual_start;
- for (off = 0; off < dev_priv->ring.Size; off += 4) {
+ for (off = 0; off < dev_priv->render_ring.size; off += 4) {
ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr);
}
@@ -344,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
+ seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
return 0;
@@ -489,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
- seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
- seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
- seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
- rgvswctl & 0x3f);
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
return 0;
}
@@ -508,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
- seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
+ seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+ (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
return 0;
@@ -541,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+ u16 crstandvid = I915_READ16(CRSTANDVID);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
"yes" : "no");
@@ -555,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
seq_printf(m, "Starting frequency: P%d\n",
(rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- seq_printf(m, "Max frequency: P%d\n",
+ seq_printf(m, "Max P-state: P%d\n",
(rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
return 0;
}
@@ -621,6 +633,36 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_emon_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long temp, chipset, gfx;
+
+ temp = i915_mch_val(dev_priv);
+ chipset = i915_chipset_val(dev_priv);
+ gfx = i915_gfx_val(dev_priv);
+
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
+
+ return 0;
+}
+
+static int i915_gfxec(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -743,6 +785,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_delayfreq_table", i915_delayfreq_table, 0},
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
+ {"i915_emon_status", i915_emon_status, 0},
+ {"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2a6b5de..59a2bf8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,84 +40,6 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-/* Really want an OS-independent resettable timer. Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
- u32 last_acthd = I915_READ(acthd_reg);
- u32 acthd;
- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- int i;
-
- trace_i915_ring_wait_begin (dev);
-
- for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- acthd = I915_READ(acthd_reg);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
- return 0;
- }
-
- if (dev->primary->master) {
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- }
-
-
- if (ring->head != last_head)
- i = 0;
- if (acthd != last_acthd)
- i = 0;
-
- last_head = ring->head;
- last_acthd = acthd;
- msleep_interruptible(10);
-
- }
-
- trace_i915_ring_wait_end (dev);
- return -EBUSY;
-}
-
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
- */
-int i915_wrap_ring(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- volatile unsigned int *virt;
- int rem;
-
- rem = dev_priv->ring.Size - dev_priv->ring.tail;
- if (dev_priv->ring.space < rem) {
- int ret = i915_wait_ring(dev, rem, __func__);
- if (ret)
- return ret;
- }
- dev_priv->ring.space -= rem;
-
- virt = (unsigned int *)
- (dev_priv->ring.virtual_start + dev_priv->ring.tail);
- rem /= 4;
- while (rem--)
- *virt++ = MI_NOOP;
-
- dev_priv->ring.tail = 0;
-
- return 0;
-}
-
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
if (IS_I965G(dev))
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev)
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
+ if (dev_priv->render_ring.status_page.gfx_addr) {
+ dev_priv->render_ring.status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
@@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
/*
* We should never lose context on the ring with modesetting
@@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
- ring->space += ring->Size;
+ ring->space += ring->size;
if (!dev->primary->master)
return;
@@ -205,12 +128,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = NULL;
- dev_priv->ring.map.handle = NULL;
- dev_priv->ring.map.size = 0;
- }
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
@@ -233,24 +153,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->ring.ring_obj != NULL) {
+ if (dev_priv->render_ring.gem_object != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->ring.Size = init->ring_size;
+ dev_priv->render_ring.size = init->ring_size;
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
+ dev_priv->render_ring.map.offset = init->ring_start;
+ dev_priv->render_ring.map.size = init->ring_size;
+ dev_priv->render_ring.map.type = 0;
+ dev_priv->render_ring.map.flags = 0;
+ dev_priv->render_ring.map.mtrr = 0;
- drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+ drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
+ if (dev_priv->render_ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -258,7 +178,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
@@ -278,26 +198,29 @@ static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (dev_priv->ring.map.handle == NULL) {
+ ring = &dev_priv->render_ring;
+
+ if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
/* Program Hardware Status Page */
- if (!dev_priv->hw_status_page) {
+ if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
- dev_priv->hw_status_page);
-
- if (dev_priv->status_gfx_addr != 0)
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ ring->status_page.page_addr);
+ if (ring->status_page.gfx_addr != 0)
+ ring->setup_status_page(dev, ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
@@ -407,9 +330,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- RING_LOCALS;
- if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+ if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
return -EINVAL;
BEGIN_LP_RING((dwords+1)&~1);
@@ -442,9 +364,7 @@ i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box = boxes[i];
- RING_LOCALS;
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -481,7 +401,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -535,10 +454,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
int i = 0, count;
- RING_LOCALS;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -587,7 +504,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
- RING_LOCALS;
if (!master_priv->sarea_priv)
return -EINVAL;
@@ -640,7 +556,8 @@ static int i915_quiescent(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+ return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
+ dev_priv->render_ring.size - 8);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -827,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
/* depends on GEM */
value = dev_priv->has_gem;
break;
+ case I915_PARAM_HAS_BSD:
+ value = HAS_BSD(dev);
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -882,6 +802,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -898,7 +819,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
@@ -909,19 +830,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap_wc(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
i915_dma_cleanup(dev);
- dev_priv->status_gfx_addr = 0;
+ ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ ring->status_page.page_addr = dev_priv->hws_map.handle;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
+ ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
- dev_priv->hw_status_page);
+ ring->status_page.page_addr);
return 0;
}
@@ -1399,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
- printk(KERN_INFO "i915: switched off\n");
+ printk(KERN_INFO "i915: switched on\n");
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
+ drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "i915: switched off\n");
+ drm_kms_helper_poll_disable(dev);
i915_suspend(dev, pmm);
}
}
@@ -1479,19 +1402,19 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_ringbuffer;
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
i915_switcheroo_can_switch);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_vga_client;
intel_modeset_init(dev);
ret = drm_irq_install(dev);
if (ret)
- goto destroy_ringbuffer;
+ goto cleanup_vga_switcheroo;
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1503,11 +1426,20 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- intel_fbdev_init(dev);
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_irq;
+
drm_kms_helper_poll_init(dev);
return 0;
-destroy_ringbuffer:
+cleanup_irq:
+ drm_irq_uninstall(dev);
+cleanup_vga_switcheroo:
+ vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
@@ -1539,14 +1471,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_get_mem_freq(struct drm_device *dev)
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
- if (!IS_PINEVIEW(dev))
- return;
-
tmp = I915_READ(CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
@@ -1575,7 +1504,524 @@ static void i915_get_mem_freq(struct drm_device *dev)
dev_priv->mem_freq = 800;
break;
}
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+struct v_table {
+ u8 vid;
+ unsigned long vd; /* in .1 mil */
+ unsigned long vm; /* in .1 mil */
+ u8 pvid;
+};
+
+static struct v_table v_table[] = {
+ { 0, 16125, 15000, 0x7f, },
+ { 1, 16000, 14875, 0x7e, },
+ { 2, 15875, 14750, 0x7d, },
+ { 3, 15750, 14625, 0x7c, },
+ { 4, 15625, 14500, 0x7b, },
+ { 5, 15500, 14375, 0x7a, },
+ { 6, 15375, 14250, 0x79, },
+ { 7, 15250, 14125, 0x78, },
+ { 8, 15125, 14000, 0x77, },
+ { 9, 15000, 13875, 0x76, },
+ { 10, 14875, 13750, 0x75, },
+ { 11, 14750, 13625, 0x74, },
+ { 12, 14625, 13500, 0x73, },
+ { 13, 14500, 13375, 0x72, },
+ { 14, 14375, 13250, 0x71, },
+ { 15, 14250, 13125, 0x70, },
+ { 16, 14125, 13000, 0x6f, },
+ { 17, 14000, 12875, 0x6e, },
+ { 18, 13875, 12750, 0x6d, },
+ { 19, 13750, 12625, 0x6c, },
+ { 20, 13625, 12500, 0x6b, },
+ { 21, 13500, 12375, 0x6a, },
+ { 22, 13375, 12250, 0x69, },
+ { 23, 13250, 12125, 0x68, },
+ { 24, 13125, 12000, 0x67, },
+ { 25, 13000, 11875, 0x66, },
+ { 26, 12875, 11750, 0x65, },
+ { 27, 12750, 11625, 0x64, },
+ { 28, 12625, 11500, 0x63, },
+ { 29, 12500, 11375, 0x62, },
+ { 30, 12375, 11250, 0x61, },
+ { 31, 12250, 11125, 0x60, },
+ { 32, 12125, 11000, 0x5f, },
+ { 33, 12000, 10875, 0x5e, },
+ { 34, 11875, 10750, 0x5d, },
+ { 35, 11750, 10625, 0x5c, },
+ { 36, 11625, 10500, 0x5b, },
+ { 37, 11500, 10375, 0x5a, },
+ { 38, 11375, 10250, 0x59, },
+ { 39, 11250, 10125, 0x58, },
+ { 40, 11125, 10000, 0x57, },
+ { 41, 11000, 9875, 0x56, },
+ { 42, 10875, 9750, 0x55, },
+ { 43, 10750, 9625, 0x54, },
+ { 44, 10625, 9500, 0x53, },
+ { 45, 10500, 9375, 0x52, },
+ { 46, 10375, 9250, 0x51, },
+ { 47, 10250, 9125, 0x50, },
+ { 48, 10125, 9000, 0x4f, },
+ { 49, 10000, 8875, 0x4e, },
+ { 50, 9875, 8750, 0x4d, },
+ { 51, 9750, 8625, 0x4c, },
+ { 52, 9625, 8500, 0x4b, },
+ { 53, 9500, 8375, 0x4a, },
+ { 54, 9375, 8250, 0x49, },
+ { 55, 9250, 8125, 0x48, },
+ { 56, 9125, 8000, 0x47, },
+ { 57, 9000, 7875, 0x46, },
+ { 58, 8875, 7750, 0x45, },
+ { 59, 8750, 7625, 0x44, },
+ { 60, 8625, 7500, 0x43, },
+ { 61, 8500, 7375, 0x42, },
+ { 62, 8375, 7250, 0x41, },
+ { 63, 8250, 7125, 0x40, },
+ { 64, 8125, 7000, 0x3f, },
+ { 65, 8000, 6875, 0x3e, },
+ { 66, 7875, 6750, 0x3d, },
+ { 67, 7750, 6625, 0x3c, },
+ { 68, 7625, 6500, 0x3b, },
+ { 69, 7500, 6375, 0x3a, },
+ { 70, 7375, 6250, 0x39, },
+ { 71, 7250, 6125, 0x38, },
+ { 72, 7125, 6000, 0x37, },
+ { 73, 7000, 5875, 0x36, },
+ { 74, 6875, 5750, 0x35, },
+ { 75, 6750, 5625, 0x34, },
+ { 76, 6625, 5500, 0x33, },
+ { 77, 6500, 5375, 0x32, },
+ { 78, 6375, 5250, 0x31, },
+ { 79, 6250, 5125, 0x30, },
+ { 80, 6125, 5000, 0x2f, },
+ { 81, 6000, 4875, 0x2e, },
+ { 82, 5875, 4750, 0x2d, },
+ { 83, 5750, 4625, 0x2c, },
+ { 84, 5625, 4500, 0x2b, },
+ { 85, 5500, 4375, 0x2a, },
+ { 86, 5375, 4250, 0x29, },
+ { 87, 5250, 4125, 0x28, },
+ { 88, 5125, 4000, 0x27, },
+ { 89, 5000, 3875, 0x26, },
+ { 90, 4875, 3750, 0x25, },
+ { 91, 4750, 3625, 0x24, },
+ { 92, 4625, 3500, 0x23, },
+ { 93, 4500, 3375, 0x22, },
+ { 94, 4375, 3250, 0x21, },
+ { 95, 4250, 3125, 0x20, },
+ { 96, 4125, 3000, 0x1f, },
+ { 97, 4125, 3000, 0x1e, },
+ { 98, 4125, 3000, 0x1d, },
+ { 99, 4125, 3000, 0x1c, },
+ { 100, 4125, 3000, 0x1b, },
+ { 101, 4125, 3000, 0x1a, },
+ { 102, 4125, 3000, 0x19, },
+ { 103, 4125, 3000, 0x18, },
+ { 104, 4125, 3000, 0x17, },
+ { 105, 4125, 3000, 0x16, },
+ { 106, 4125, 3000, 0x15, },
+ { 107, 4125, 3000, 0x14, },
+ { 108, 4125, 3000, 0x13, },
+ { 109, 4125, 3000, 0x12, },
+ { 110, 4125, 3000, 0x11, },
+ { 111, 4125, 3000, 0x10, },
+ { 112, 4125, 3000, 0x0f, },
+ { 113, 4125, 3000, 0x0e, },
+ { 114, 4125, 3000, 0x0d, },
+ { 115, 4125, 3000, 0x0c, },
+ { 116, 4125, 3000, 0x0b, },
+ { 117, 4125, 3000, 0x0a, },
+ { 118, 4125, 3000, 0x09, },
+ { 119, 4125, 3000, 0x08, },
+ { 120, 1125, 0, 0x07, },
+ { 121, 1000, 0, 0x06, },
+ { 122, 875, 0, 0x05, },
+ { 123, 750, 0, 0x04, },
+ { 124, 625, 0, 0x03, },
+ { 125, 500, 0, 0x02, },
+ { 126, 375, 0, 0x01, },
+ { 127, 0, 0, 0x00, },
+};
+
+struct cparams {
+ int i;
+ int t;
+ int m;
+ int c;
+};
+
+static struct cparams cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ div_u64(ret, 10);
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ unsigned long val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(v_table); i++) {
+ if (v_table[i].pvid == pxvid) {
+ if (IS_MOBILE(dev_priv->dev))
+ val = v_table[i].vm;
+ else
+ val = v_table[i].vd;
+ }
+ }
+
+ return val;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ div_u64(diff, diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
/**
* i915_driver_load - setup chip and create an initial config
@@ -1594,7 +2040,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
-
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1672,6 +2117,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->has_gem = 0;
}
+ if (dev_priv->has_gem == 0 &&
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
+ ret = -ENODEV;
+ goto out_iomapfree;
+ }
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
@@ -1691,7 +2143,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_workqueue_free;
}
- i915_get_mem_freq(dev);
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_IRONLAKE(dev))
+ i915_ironlake_get_mem_freq(dev);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1709,7 +2164,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->user_irq_lock);
spin_lock_init(&dev_priv->error_lock);
- dev_priv->user_irq_refcount = 0;
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1738,6 +2192,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
+
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
return 0;
out_workqueue_free:
@@ -1759,6 +2219,10 @@ int i915_driver_unload(struct drm_device *dev)
i915_destroy_error_state(dev);
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+
destroy_workqueue(dev_priv->wq);
del_timer_sync(&dev_priv->hangcheck_timer);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c51e45..423dc90 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -60,95 +60,95 @@ extern int intel_agp_enabled;
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
-const static struct intel_device_info intel_i830_info = {
+static const struct intel_device_info intel_i830_info = {
.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_845g_info = {
+static const struct intel_device_info intel_845g_info = {
.is_i8xx = 1,
};
-const static struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info intel_i85x_info = {
.is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info intel_i865g_info = {
.is_i8xx = 1,
};
-const static struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info intel_i915g_info = {
.is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info intel_i915gm_info = {
.is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info intel_i945g_info = {
.is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info intel_i945gm_info = {
.is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
-const static struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info intel_i965g_info = {
.is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
};
-const static struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info intel_i965gm_info = {
.is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_g33_info = {
+static const struct intel_device_info intel_g33_info = {
.is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_g45_info = {
+static const struct intel_device_info intel_g45_info = {
.is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info intel_gm45_info = {
.is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_info = {
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
.need_gfx_hws = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info intel_ironlake_d_info = {
.is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info intel_ironlake_m_info = {
.is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
.need_gfx_hws = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
-const static struct intel_device_info intel_sandybridge_d_info = {
+static const struct intel_device_info intel_sandybridge_d_info = {
.is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_hotplug = 1, .is_gen6 = 1,
};
-const static struct intel_device_info intel_sandybridge_m_info = {
+static const struct intel_device_info intel_sandybridge_m_info = {
.is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
.has_hotplug = 1, .is_gen6 = 1,
};
-const static struct pci_device_id pciidlist[] = {
+static const struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
/*
* Clear request list
*/
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
if (need_display)
i915_save_display(dev);
@@ -370,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
}
} else {
DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ mutex_unlock(&dev->struct_mutex);
return -ENODEV;
}
@@ -388,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->mm.suspended) {
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
- struct drm_gem_object *obj = ring->ring_obj;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ !dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- }
-
+ ring->init(dev, ring);
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f797ef..2765831 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@
#include "i915_reg.h"
#include "intel_bios.h"
+#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
/* General customization:
@@ -55,6 +56,8 @@ enum plane {
#define I915_NUM_PIPE 2
+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
/* Interface history:
*
* 1.1: Original.
@@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object {
struct drm_gem_object *cur_obj;
};
-typedef struct _drm_i915_ring_buffer {
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
- struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
@@ -241,17 +234,15 @@ typedef struct drm_i915_private {
void __iomem *regs;
struct pci_dev *bridge_dev;
- drm_i915_ring_buffer_t ring;
+ struct intel_ring_buffer render_ring;
+ struct intel_ring_buffer bsd_ring;
drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int status_gfx_addr;
unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
- struct drm_gem_object *hws_obj;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
@@ -267,8 +258,6 @@ typedef struct drm_i915_private {
atomic_t irq_received;
/** Protects user_irq_refcount and irq_mask_reg */
spinlock_t user_irq_lock;
- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
- int user_irq_refcount;
u32 trace_irq_seqno;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
@@ -289,6 +278,7 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ int num_pipe;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -334,7 +324,7 @@ typedef struct drm_i915_private {
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
- unsigned int fsb_freq, mem_freq;
+ unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
struct drm_i915_error_state *first_error;
@@ -514,18 +504,7 @@ typedef struct drm_i915_private {
*/
struct list_head shrink_list;
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
spinlock_t active_list_lock;
- struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
@@ -563,12 +542,6 @@ typedef struct drm_i915_private {
struct list_head fence_list;
/**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -644,6 +617,18 @@ typedef struct drm_i915_private {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
+ spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason;
@@ -671,19 +656,64 @@ struct drm_i915_gem_object {
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
*/
- int active;
+ unsigned int active : 1;
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
- int dirty;
+ unsigned int dirty : 1;
+
+ /**
+ * Fence register bits (if any) for this object. Will be set
+ * as needed when mapped into the GTT.
+ * Protected by dev->struct_mutex.
+ *
+ * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
+ */
+ int fence_reg : 5;
+
+ /**
+ * Used for checking the object doesn't appear more than once
+ * in an execbuffer object list.
+ */
+ unsigned int in_execbuffer : 1;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv : 2;
+
+ /**
+ * Refcount for the pages array. With the current locking scheme, there
+ * are at most two concurrent users: Binding a bo to the gtt and
+ * pwrite/pread using physical addresses. So two bits for a maximum
+ * of two users are enough.
+ */
+ unsigned int pages_refcount : 2;
+#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
+
+ /**
+ * Current tiling mode for the object.
+ */
+ unsigned int tiling_mode : 2;
+
+ /** How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ int pin_count : 4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem;
struct page **pages;
- int pages_refcount;
/**
* Current offset of the object in GTT space.
@@ -692,26 +722,18 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
+ /* Which ring is refering to is this object */
+ struct intel_ring_buffer *ring;
+
/**
* Fake offset for use by mmap(2)
*/
uint64_t mmap_offset;
- /**
- * Fence register bits (if any) for this object. Will be set
- * as needed when mapped into the GTT.
- * Protected by dev->struct_mutex.
- */
- int fence_reg;
-
- /** How many users have pinned this object in GTT space */
- int pin_count;
-
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
- /** Current tiling mode for the object. */
- uint32_t tiling_mode;
+ /** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
/** Record of address bit 17 of each page at last unbind. */
@@ -734,17 +756,6 @@ struct drm_i915_gem_object {
struct drm_i915_gem_phys_object *phys_obj;
/**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- int in_execbuffer;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- int madv;
-
- /**
* Number of crtcs where this object is currently the fb, but
* will be page flipped away on the next vblank. When it
* reaches 0, dev_priv->pending_flip_queue will be woken up.
@@ -765,6 +776,9 @@ struct drm_i915_gem_object {
* an emission time with seqnos for tracking how far ahead of the GPU we are.
*/
struct drm_i915_gem_request {
+ /** On Which ring this request was generated */
+ struct intel_ring_buffer *ring;
+
/** GEM sequence number associated with this request. */
uint32_t seqno;
@@ -821,6 +835,11 @@ extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
extern int i965_reset(struct drm_device *dev, u8 flags);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
@@ -829,9 +848,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -849,6 +866,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
+ u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
+ u32 mask);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -922,11 +944,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -937,9 +961,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+uint32_t i915_add_request(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring);
+int i915_do_wait_request(struct drm_device *dev,
+ uint32_t seqno, int interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
@@ -1015,7 +1043,7 @@ extern void g4x_disable_fbc(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
-
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1026,7 +1054,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+ if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+ == NULL) \
LOCK_TEST_WITH_RETURN(dev, file_priv); \
} while (0)
@@ -1039,35 +1068,31 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
#define I915_READ64(reg) readq(dev_priv->regs + (reg))
#define POSTING_READ(reg) (void)I915_READ(reg)
+#define POSTING_READ16(reg) (void)I915_READ16(reg)
#define I915_VERBOSE 0
-#define RING_LOCALS volatile unsigned int *ring_virt__;
-
-#define BEGIN_LP_RING(n) do { \
- int bytes__ = 4*(n); \
- if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
- /* a wrap must occur between instructions so pad beforehand */ \
- if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
- i915_wrap_ring(dev); \
- if (unlikely (dev_priv->ring.space < bytes__)) \
- i915_wait_ring(dev, bytes__, __func__); \
- ring_virt__ = (unsigned int *) \
- (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
- dev_priv->ring.tail += bytes__; \
- dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
- dev_priv->ring.space -= bytes__; \
+#define BEGIN_LP_RING(n) do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
+ if (I915_VERBOSE) \
+ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
+ intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \
} while (0)
-#define OUT_RING(n) do { \
- if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *ring_virt__++ = (n); \
+
+#define OUT_RING(x) do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
+ if (I915_VERBOSE) \
+ DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
+ intel_ring_emit(dev, &dev_priv->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
- I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \
+ DRM_DEBUG("ADVANCE_LP_RING %x\n", \
+ dev_priv->render_ring.tail); \
+ intel_ring_advance(dev, &dev_priv->render_ring); \
} while(0)
/**
@@ -1085,14 +1110,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
+ (dev_priv->render_ring.status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
-extern int i915_wrap_ring(struct drm_device * dev);
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
@@ -1138,6 +1161,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2A42 || \
(dev)->pci_device == 0x2E42)
+#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 112699f..9ded3da 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,8 +35,6 @@
#include <linux/swap.h>
#include <linux/pci.h>
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -169,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
obj_priv->tiling_mode != I915_TILING_NONE;
}
-static inline int
+static inline void
slow_shmem_copy(struct page *dst_page,
int dst_offset,
struct page *src_page,
@@ -178,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
{
char *dst_vaddr, *src_vaddr;
- dst_vaddr = kmap_atomic(dst_page, KM_USER0);
- if (dst_vaddr == NULL)
- return -ENOMEM;
-
- src_vaddr = kmap_atomic(src_page, KM_USER1);
- if (src_vaddr == NULL) {
- kunmap_atomic(dst_vaddr, KM_USER0);
- return -ENOMEM;
- }
+ dst_vaddr = kmap(dst_page);
+ src_vaddr = kmap(src_page);
memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
- kunmap_atomic(src_vaddr, KM_USER1);
- kunmap_atomic(dst_vaddr, KM_USER0);
-
- return 0;
+ kunmap(src_page);
+ kunmap(dst_page);
}
-static inline int
+static inline void
slow_shmem_bit17_copy(struct page *gpu_page,
int gpu_offset,
struct page *cpu_page,
@@ -216,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
cpu_page, cpu_offset, length);
}
- gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
- if (gpu_vaddr == NULL)
- return -ENOMEM;
-
- cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
- if (cpu_vaddr == NULL) {
- kunmap_atomic(gpu_vaddr, KM_USER0);
- return -ENOMEM;
- }
+ gpu_vaddr = kmap(gpu_page);
+ cpu_vaddr = kmap(cpu_page);
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
* XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -248,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
length -= this_length;
}
- kunmap_atomic(cpu_vaddr, KM_USER1);
- kunmap_atomic(gpu_vaddr, KM_USER0);
-
- return 0;
+ kunmap(cpu_page);
+ kunmap(gpu_page);
}
/**
@@ -427,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;
if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 1);
- } else {
- ret = slow_shmem_copy(user_pages[data_page_index],
- data_page_offset,
- obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
- page_length);
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length,
+ 1);
+ } else {
+ slow_shmem_copy(user_pages[data_page_index],
+ data_page_offset,
+ obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ page_length);
}
- if (ret)
- goto fail_put_pages;
remain -= page_length;
data_ptr += page_length;
@@ -531,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
* page faults
*/
-static inline int
+static inline void
slow_kernel_write(struct io_mapping *mapping,
loff_t gtt_base, int gtt_offset,
struct page *user_page, int user_offset,
int length)
{
- char *src_vaddr, *dst_vaddr;
- unsigned long unwritten;
+ char __iomem *dst_vaddr;
+ char *src_vaddr;
- dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
- src_vaddr = kmap_atomic(user_page, KM_USER1);
- unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
- kunmap_atomic(src_vaddr, KM_USER1);
- io_mapping_unmap_atomic(dst_vaddr);
- if (unwritten)
- return -EFAULT;
- return 0;
+ dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
+ src_vaddr = kmap(user_page);
+
+ memcpy_toio(dst_vaddr + gtt_offset,
+ src_vaddr + user_offset,
+ length);
+
+ kunmap(user_page);
+ io_mapping_unmap(dst_vaddr);
}
static inline int
@@ -722,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- goto out_unpin_object;
+ slow_kernel_write(dev_priv->mm.gtt_mapping,
+ gtt_page_base, gtt_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
remain -= page_length;
offset += page_length;
@@ -902,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;
if (do_bit17_swizzling) {
- ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 0);
- } else {
- ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
- page_length);
+ page_length,
+ 0);
+ } else {
+ slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
}
- if (ret)
- goto fail_put_pages;
remain -= page_length;
data_ptr += page_length;
@@ -973,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (obj_priv->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0) {
+ dev->gtt_total != 0 &&
+ obj->write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
if (ret == -EFAULT) {
ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -1484,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
}
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ BUG_ON(ring == NULL);
+ obj_priv->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj_priv->active) {
@@ -1497,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
}
/* Move from whatever list we were on to the tail of execution. */
spin_lock(&dev_priv->mm.active_list_lock);
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.active_list);
+ list_move_tail(&obj_priv->list, &ring->active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1551,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
obj_priv->last_rendering_seqno = 0;
+ obj_priv->ring = NULL;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@@ -1560,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
static void
i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains, uint32_t seqno)
+ uint32_t flush_domains, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv, *next;
@@ -1571,12 +1544,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
struct drm_gem_object *obj = &obj_priv->base;
if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
+ obj->write_domain &&
+ obj_priv->ring->ring_flag == ring->ring_flag) {
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno);
+ i915_gem_object_move_to_active(obj, seqno, ring);
/* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,31 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
}
-#define PIPE_CONTROL_FLUSH(addr) \
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
- PIPE_CONTROL_DEPTH_STALL); \
- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
- OUT_RING(0); \
- OUT_RING(0); \
-
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains)
+ uint32_t flush_domains, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_file_private *i915_file_priv = NULL;
struct drm_i915_gem_request *request;
uint32_t seqno;
int was_empty;
- RING_LOCALS;
if (file_priv != NULL)
i915_file_priv = file_priv->driver_priv;
@@ -1626,62 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (request == NULL)
return 0;
- /* Grab the seqno we're going to make this request be, and bump the
- * next (skipping 0 so it can be the reserved no-seqno value).
- */
- seqno = dev_priv->mm.next_gem_seqno;
- dev_priv->mm.next_gem_seqno++;
- if (dev_priv->mm.next_gem_seqno == 0)
- dev_priv->mm.next_gem_seqno++;
-
- if (HAS_PIPE_CONTROL(dev)) {
- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
-
- /*
- * Workaround qword write incoherence by flushing the
- * PIPE_NOTIFY buffers out to memory before requesting
- * an interrupt.
- */
- BEGIN_LP_RING(32);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
-
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- DRM_DEBUG_DRIVER("%d\n", seqno);
+ seqno = ring->add_request(dev, ring, file_priv, flush_domains);
request->seqno = seqno;
+ request->ring = ring;
request->emitted_jiffies = jiffies;
- was_empty = list_empty(&dev_priv->mm.request_list);
- list_add_tail(&request->list, &dev_priv->mm.request_list);
+ was_empty = list_empty(&ring->request_list);
+ list_add_tail(&request->list, &ring->request_list);
+
if (i915_file_priv) {
list_add_tail(&request->client_list,
&i915_file_priv->mm.request_list);
@@ -1693,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* domain we're flushing with our flush.
*/
if (flush_domains != 0)
- i915_gem_process_flushing_list(dev, flush_domains, seqno);
+ i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1710,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* before signalling the CPU
*/
static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
uint32_t flush_domains = 0;
- RING_LOCALS;
/* The sampler always gets flushed on i965 (sigh) */
if (IS_I965G(dev))
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
+
+ ring->flush(dev, ring,
+ I915_GEM_DOMAIN_COMMAND, flush_domains);
return flush_domains;
}
@@ -1743,11 +1649,11 @@ i915_gem_retire_request(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
spin_lock(&dev_priv->mm.active_list_lock);
- while (!list_empty(&dev_priv->mm.active_list)) {
+ while (!list_empty(&request->ring->active_list)) {
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ obj_priv = list_first_entry(&request->ring->active_list,
struct drm_i915_gem_object,
list);
obj = &obj_priv->base;
@@ -1794,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
}
uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (HAS_PIPE_CONTROL(dev))
- return ((volatile u32 *)(dev_priv->seqno_page))[0];
- else
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ return ring->get_gem_seqno(dev, ring);
}
/**
* This function clears the request list as sequence numbers are passed.
*/
void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+ if (!ring->status_page.page_addr
+ || list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev);
+ seqno = i915_get_gem_seqno(dev, ring);
- while (!list_empty(&dev_priv->mm.request_list)) {
+ while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
uint32_t retiring_seqno;
- request = list_first_entry(&dev_priv->mm.request_list,
+ request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
retiring_seqno = request->seqno;
@@ -1840,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev)
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- i915_user_irq_put(dev);
+
+ ring->user_irq_put(dev, ring);
dev_priv->trace_irq_seqno = 0;
}
}
@@ -1856,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work)
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
if (!dev_priv->mm.suspended &&
- !list_empty(&dev_priv->mm.request_list))
+ (!list_empty(&dev_priv->render_ring.request_list) ||
+ (HAS_BSD(dev) &&
+ !list_empty(&dev_priv->bsd_ring.request_list))))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ int interruptible, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1875,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+ if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1889,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
trace_i915_gem_request_wait_begin(dev, seqno);
- dev_priv->mm.waiting_gem_seqno = seqno;
- i915_user_irq_get(dev);
+ ring->waiting_gem_seqno = seqno;
+ ring->user_irq_get(dev, ring);
if (interruptible)
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(
+ ring->get_gem_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
else
- wait_event(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(
+ ring->get_gem_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- i915_user_irq_put(dev);
- dev_priv->mm.waiting_gem_seqno = 0;
+ ring->user_irq_put(dev, ring);
+ ring->waiting_gem_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
@@ -1910,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, i915_get_gem_seqno(dev));
+ __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
@@ -1918,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, ring);
return ret;
}
@@ -1928,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
* request and object lists appropriately for that event.
*/
static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno,
+ struct intel_ring_buffer *ring)
{
- return i915_do_wait_request(dev, seqno, 1);
+ return i915_do_wait_request(dev, seqno, 1, ring);
}
static void
@@ -1939,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev,
uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd;
- RING_LOCALS;
-
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
- trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
- invalidate_domains, flush_domains);
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
+ dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+ invalidate_domains,
+ flush_domains);
+
+ if (HAS_BSD(dev))
+ dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+ invalidate_domains,
+ flush_domains);
+}
- if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring)
+{
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+ ring->flush(dev, ring,
+ invalidate_domains,
+ flush_domains);
}
/**
@@ -2030,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ ret = i915_wait_request(dev,
+ obj_priv->last_rendering_seqno, obj_priv->ring);
if (ret != 0)
return ret;
}
@@ -2146,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- uint32_t seqno;
+ uint32_t seqno1, seqno2;
+ int ret;
spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
+ lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev) ||
+ list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
@@ -2158,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
+ seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+ &dev_priv->render_ring);
+ if (seqno1 == 0)
return -ENOMEM;
+ ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev)) {
+ seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+ &dev_priv->bsd_ring);
+ if (seqno2 == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+ if (ret)
+ return ret;
+ }
+
- return i915_wait_request(dev, seqno);
+ return ret;
}
static int
@@ -2175,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev)
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
@@ -2195,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev)
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!lists_empty);
@@ -2209,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
struct drm_gem_object *obj;
int ret;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
for (;;) {
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, bsd_ring);
/* If there's an inactive buffer available now, grab it
* and be done.
@@ -2234,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
* things, wait for the next to finish and hopefully leave us
* a buffer to evict.
*/
- if (!list_empty(&dev_priv->mm.request_list)) {
+ if (!list_empty(&render_ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&render_ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
struct drm_i915_gem_request *request;
- request = list_first_entry(&dev_priv->mm.request_list,
+ request = list_first_entry(&bsd_ring->request_list,
struct drm_i915_gem_request,
list);
- ret = i915_wait_request(dev, request->seqno);
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
if (ret)
return ret;
@@ -2268,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
if (obj != NULL) {
uint32_t seqno;
- i915_gem_flush(dev,
+ i915_gem_flush_ring(dev,
+ obj->write_domain,
obj->write_domain,
- obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
+ obj_priv->ring);
+ seqno = i915_add_request(dev, NULL,
+ obj->write_domain,
+ obj_priv->ring);
if (seqno == 0)
return -ENOMEM;
continue;
@@ -2299,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
struct inode *inode;
struct page *page;
+ BUG_ON(obj_priv->pages_refcount
+ == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
+
if (obj_priv->pages_refcount++ != 0)
return 0;
@@ -2697,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return -EINVAL;
}
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->size > dev->gtt_total) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ return -E2BIG;
+ }
+
search_free:
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
@@ -2807,6 +2737,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
@@ -2814,7 +2745,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- (void) i915_add_request(dev, NULL, obj->write_domain);
+ (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
BUG_ON(obj->write_domain);
trace_i915_gem_object_change_domain(obj,
@@ -2954,23 +2885,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
DRM_INFO("%s: object %p wait for seqno %08x\n",
__func__, obj, obj_priv->last_rendering_seqno);
#endif
- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+ ret = i915_do_wait_request(dev,
+ obj_priv->last_rendering_seqno,
+ 0,
+ obj_priv->ring);
if (ret != 0)
return ret;
}
+ i915_gem_object_flush_cpu_write_domain(obj);
+
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
@@ -3354,9 +3286,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
obj_priv->tiling_mode != I915_TILING_NONE;
/* Check fence reg constraints and rebind if necessary */
- if (need_fence && !i915_gem_object_fence_offset_ok(obj,
- obj_priv->tiling_mode))
- i915_gem_object_unbind(obj);
+ if (need_fence &&
+ !i915_gem_object_fence_offset_ok(obj,
+ obj_priv->tiling_mode)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -3370,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
if (need_fence) {
ret = i915_gem_object_get_fence_reg(obj);
if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
i915_gem_object_unpin(obj);
return ret;
}
@@ -3545,62 +3478,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return 0;
}
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- RING_LOCALS;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
-
- if (IS_I830(dev) || IS_845G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- OUT_RING(exec_start + exec_len - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- OUT_RING(exec_start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6));
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- }
- ADVANCE_LP_RING();
- }
- }
-
- /* XXX breadcrumb */
- return 0;
-}
-
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3629,7 +3506,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- ret = i915_wait_request(dev, request->seqno);
+ ret = i915_wait_request(dev, request->seqno, request->ring);
if (ret != 0)
break;
}
@@ -3786,10 +3663,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
+ struct intel_ring_buffer *ring = NULL;
+
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
+ if (args->flags & I915_EXEC_BSD) {
+ if (!HAS_BSD(dev)) {
+ DRM_ERROR("execbuf with wrong flag\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->bsd_ring;
+ } else {
+ ring = &dev_priv->render_ring;
+ }
+
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3902,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret != -ENOSPC || pin_tries >= 1) {
if (ret != -ERESTARTSYS) {
unsigned long long total_size = 0;
- for (i = 0; i < args->buffer_count; i++)
+ int num_fences = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ obj_priv = object_list[i]->driver_private;
+
total_size += object_list[i]->size;
- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+ num_fences +=
+ exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+ }
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
pinned+1, args->buffer_count,
- total_size, ret);
+ total_size, num_fences,
+ ret);
DRM_ERROR("%d objects [%d pinned], "
"%d object bytes [%d pinned], "
"%d/%d gtt bytes\n",
@@ -3976,9 +3873,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
(void)i915_add_request(dev, file_priv,
- dev->flush_domains);
+ dev->flush_domains,
+ &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->bsd_ring);
+ }
}
for (i = 0; i < args->buffer_count; i++) {
@@ -4015,7 +3919,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
#endif
/* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+ ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+ cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
@@ -4025,7 +3930,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires
*/
- flush_domains = i915_retire_commands(dev);
+ flush_domains = i915_retire_commands(dev, ring);
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4036,12 +3941,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* *some* interrupts representing completion of buffers that we can
* wait on when trying to clear up gtt space).
*/
- seqno = i915_add_request(dev, file_priv, flush_domains);
+ seqno = i915_add_request(dev, file_priv, flush_domains, ring);
BUG_ON(seqno == 0);
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ obj_priv = to_intel_bo(obj);
- i915_gem_object_move_to_active(obj, seqno);
+ i915_gem_object_move_to_active(obj, seqno, ring);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@@ -4153,7 +4059,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = 0;
+ exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
if (!ret) {
@@ -4239,7 +4145,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
+ BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+
i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (obj_priv->gtt_space != NULL) {
+ if (alignment == 0)
+ alignment = i915_gem_get_gtt_alignment(obj);
+ if (obj_priv->gtt_offset & (alignment - 1)) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+ }
+ }
+
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret)
@@ -4392,6 +4311,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ drm_i915_private_t *dev_priv = dev->dev_private;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
@@ -4406,7 +4326,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* actually unmasked, and our working set ends up being larger than
* required.
*/
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
@@ -4573,7 +4496,10 @@ i915_gem_idle(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+ if (dev_priv->mm.suspended ||
+ (dev_priv->render_ring.gem_object == NULL) ||
+ (HAS_BSD(dev) &&
+ dev_priv->bsd_ring.gem_object == NULL)) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4654,71 +4580,6 @@ err:
return ret;
}
-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- /* If we need a physical address for the status page, it's already
- * initialized at driver load time.
- */
- if (!I915_NEED_GFX_HWS(dev))
- return 0;
-
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- ret = -ENOMEM;
- goto err;
- }
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- goto err_unref;
- }
-
- dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-
- dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
- if (dev_priv->hw_status_page == NULL) {
- DRM_ERROR("Failed to map status page.\n");
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- ret = -EINVAL;
- goto err_unpin;
- }
-
- if (HAS_PIPE_CONTROL(dev)) {
- ret = i915_gem_init_pipe_control(dev);
- if (ret)
- goto err_unpin;
- }
-
- dev_priv->hws_obj = obj;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- if (IS_GEN6(dev)) {
- I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA_GEN6); /* posting read */
- } else {
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
- }
- DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
- return 0;
-
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
-err:
- return 0;
-}
static void
i915_gem_cleanup_pipe_control(struct drm_device *dev)
@@ -4737,146 +4598,46 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
dev_priv->seqno_page = NULL;
}
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- if (dev_priv->hws_obj == NULL)
- return;
-
- obj = dev_priv->hws_obj;
- obj_priv = to_intel_bo(obj);
-
- kunmap(obj_priv->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->hws_obj = NULL;
-
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- dev_priv->hw_status_page = NULL;
-
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
-
- /* Write high address into HWS_PGA when disabling. */
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
int ret;
- u32 head;
-
- ret = i915_gem_init_hws(dev);
- if (ret != 0)
- return ret;
- obj = i915_gem_alloc_object(dev, 128 * 1024);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- i915_gem_cleanup_hws(dev);
- return -ENOMEM;
- }
- obj_priv = to_intel_bo(obj);
+ dev_priv->render_ring = render_ring;
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return ret;
+ if (!I915_NEED_GFX_HWS(dev)) {
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
+ memset(dev_priv->render_ring.status_page.page_addr,
+ 0, PAGE_SIZE);
}
- /* Set up the kernel mapping for the ring. */
- ring->Size = obj->size;
-
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
- ring->map.size = obj->size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return -EINVAL;
- }
- ring->ring_obj = obj;
- ring->virtual_start = ring->map.handle;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
- DRM_ERROR("Ring head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- I915_WRITE(PRB0_HEAD, 0);
-
- DRM_ERROR("Ring head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- }
-
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
-
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* If the head is still not zero, the ring is dead */
- if (head != 0) {
- DRM_ERROR("Ring initialization failed "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- return -EIO;
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ return ret;
}
- /* Update our cache of the ring state */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- }
+ ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ if (ret)
+ goto cleanup_pipe_control;
- if (IS_I9XX(dev) && !IS_GEN3(dev)) {
- I915_WRITE(MI_MODE,
- (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ if (HAS_BSD(dev)) {
+ dev_priv->bsd_ring = bsd_ring;
+ ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (ret)
+ goto cleanup_render_ring;
}
return 0;
+
+cleanup_render_ring:
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+cleanup_pipe_control:
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+ return ret;
}
void
@@ -4884,17 +4645,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->ring.ring_obj == NULL)
- return;
-
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
- i915_gem_object_unpin(dev_priv->ring.ring_obj);
- drm_gem_object_unreference(dev_priv->ring.ring_obj);
- dev_priv->ring.ring_obj = NULL;
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
- i915_gem_cleanup_hws(dev);
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
}
int
@@ -4922,12 +4677,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
spin_lock(&dev_priv->mm.active_list_lock);
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+ BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
@@ -4966,18 +4723,20 @@ i915_gem_load(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->mm.active_list_lock);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+ INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+ if (HAS_BSD(dev)) {
+ INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+ INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+ }
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
-
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
@@ -5209,7 +4968,9 @@ i915_gpu_is_active(struct drm_device *dev)
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list);
+ list_empty(&dev_priv->render_ring.active_list);
+ if (HAS_BSD(dev))
+ lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
return !lists_empty;
@@ -5254,8 +5015,10 @@ rescan:
continue;
spin_unlock(&shrink_list_lock);
+ i915_gem_retire_requests(dev, &dev_priv->render_ring);
- i915_gem_retire_requests(dev);
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8c3f080..2479be0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -53,7 +53,7 @@
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
PIPE_VBLANK_INTERRUPT_STATUS)
@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline void
+void
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline void
+void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -278,10 +278,9 @@ static void i915_handle_rps_change(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
- u16 rgvswctl;
u8 new_delay = dev_priv->cur_delay;
- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = I915_READ(RCPREVBSYTUPAVG);
busy_down = I915_READ(RCPREVBSYTDNAVG);
max_avg = I915_READ(RCBMAXAVG);
@@ -300,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev)
new_delay = dev_priv->min_delay;
}
- DRM_DEBUG("rps change requested: %d -> %d\n",
- dev_priv->cur_delay, new_delay);
-
- rgvswctl = I915_READ(MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_ERROR("gpu busy, RCS change rejected\n");
- return; /* still busy with another command */
- }
-
- /* Program the new state */
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE(MEMSWCTL, rgvswctl);
- POSTING_READ(MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
-
- dev_priv->cur_delay = new_delay;
-
- DRM_DEBUG("rps changed\n");
+ if (ironlake_set_drps(dev, new_delay))
+ dev_priv->cur_delay = new_delay;
return;
}
@@ -331,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
struct drm_i915_master_private *master_priv;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -354,13 +335,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
}
if (gt_iir & GT_PIPE_NOTIFY) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
+ u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+ render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
if (de_iir & DE_GSE)
ironlake_opregion_gse_intr(dev);
@@ -388,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
}
if (de_iir & DE_PCU_EVENT) {
- I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
i915_handle_rps_change(dev);
}
@@ -536,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
*/
bbaddr = 0;
head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring = (u32 *)(dev_priv->ring.virtual_start + head);
+ ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
- while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
bbaddr = i915_get_bbaddr(dev, ring);
if (bbaddr)
break;
}
if (bbaddr == 0) {
- ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
- while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ ring = (u32 *)(dev_priv->render_ring.virtual_start
+ + dev_priv->render_ring.size);
+ while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
bbaddr = i915_get_bbaddr(dev, ring);
if (bbaddr)
break;
@@ -587,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- error->seqno = i915_get_gem_seqno(dev);
+ error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
@@ -615,7 +600,9 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[0] = NULL;
batchbuffer[1] = NULL;
count = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->render_ring.active_list, list) {
+
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -639,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev)
error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
/* Record the ringbuffer */
- error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
+ error->ringbuffer = i915_error_object_create(dev,
+ dev_priv->render_ring.gem_object);
/* Record buffers on the active list. */
error->active_bo = NULL;
@@ -651,7 +639,8 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->render_ring.active_list, list) {
struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
@@ -703,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev)
i915_error_state_free(dev, error);
}
-/**
- * i915_handle_error - handle an error interrupt
- * @dev: drm device
- *
- * Do some basic checking of regsiter state at error interrupt time and
- * dump it to the syslog. Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs. Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
- i915_capture_error_state(dev);
+ if (!eir)
+ return;
printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
eir);
@@ -766,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
+ u32 pipea_stats = I915_READ(PIPEASTAT);
+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
+
printk(KERN_ERR "memory refresh error\n");
printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
pipea_stats);
@@ -822,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+static void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_capture_error_state(dev);
+ i915_report_and_clear_eir(dev);
if (wedged) {
atomic_set(&dev_priv->mm.wedged, 1);
@@ -829,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -848,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
atomic_inc(&dev_priv->irq_received);
@@ -928,14 +928,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
+ u32 seqno =
+ render_ring->get_gem_seqno(dev, render_ring);
+ render_ring->irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
intel_prepare_page_flip(dev, 0);
@@ -984,7 +988,6 @@ static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
i915_kernel_lost_context(dev);
@@ -1006,43 +1009,13 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->counter;
}
-void i915_user_irq_get(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void i915_user_irq_put(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
if (dev_priv->trace_irq_seqno == 0)
- i915_user_irq_get(dev);
+ render_ring->user_irq_get(dev, render_ring);
dev_priv->trace_irq_seqno = seqno;
}
@@ -1052,6 +1025,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -1065,10 +1039,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- i915_user_irq_get(dev);
- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+ render_ring->user_irq_get(dev, render_ring);
+ DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
- i915_user_irq_put(dev);
+ render_ring->user_irq_put(dev, render_ring);
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1087,7 +1061,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- if (!dev_priv || !dev_priv->ring.virtual_start) {
+ if (!dev_priv || !dev_priv->render_ring.virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1233,9 +1207,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
+struct drm_i915_gem_request *
+i915_get_tail_request(struct drm_device *dev)
+{
drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+ return list_entry(dev_priv->render_ring.request_list.prev,
+ struct drm_i915_gem_request, list);
}
/**
@@ -1260,8 +1237,10 @@ void i915_hangcheck_elapsed(unsigned long data)
acthd = I915_READ(ACTHD_I965);
/* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->mm.request_list) ||
- i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+ if (list_empty(&dev_priv->render_ring.request_list) ||
+ i915_seqno_passed(i915_get_gem_seqno(dev,
+ &dev_priv->render_ring),
+ i915_get_tail_request(dev)->seqno)) {
dev_priv->hangcheck_count = 0;
return;
}
@@ -1314,7 +1293,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_PIPE_NOTIFY;
+ u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
@@ -1328,7 +1307,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
(void) I915_READ(DEIER);
/* user interrupt should be enabled, but masked initial */
- dev_priv->gt_irq_mask_reg = 0xffffffff;
+ dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
@@ -1391,7 +1370,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+
+ if (HAS_BSD(dev))
+ DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f3e39cc..64b0a3a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,6 +334,7 @@
#define I915_DEBUG_INTERRUPT (1<<2)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
+#define I915_BSD_USER_INTERRUPT (1<<25)
#define EIR 0x020b0
#define EMR 0x020b4
#define ESR 0x020b8
@@ -368,6 +369,36 @@
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+/* GEN6 interrupt control */
+#define GEN6_RENDER_HWSTAM 0x2098
+#define GEN6_RENDER_IMR 0x20a8
+#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
+#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
+#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6)
+#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
+#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
+#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
+#define GEN6_RENDER_SYNC_STATUS (1 << 2)
+#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
+#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
+
+#define GEN6_BLITTER_HWSTAM 0x22098
+#define GEN6_BLITTER_IMR 0x220a8
+#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
+#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
+#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
+#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
+/*
+ * BSD (bit stream decoder instruction and interrupt control register defines
+ * (G4X and Ironlake only)
+ */
+
+#define BSD_RING_TAIL 0x04030
+#define BSD_RING_HEAD 0x04034
+#define BSD_RING_START 0x04038
+#define BSD_RING_CTL 0x0403c
+#define BSD_RING_ACTHD 0x04074
+#define BSD_HWS_PGA 0x04080
/*
* Framebuffer compression (915+ only)
@@ -805,6 +836,10 @@
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+/** Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL 0x101a8
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
@@ -826,6 +861,12 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
+#define TR1 0x11006
+#define TSFS 0x11020
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
#define CRSTANDVID 0x11100
#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
#define PXVFREQ_PX_MASK 0x7f000000
@@ -964,6 +1005,41 @@
#define MEMSTAT_SRC_CTL_STDBY 3
#define RCPREVBSYTUPAVG 0x113b8
#define RCPREVBSYTDNAVG 0x113bc
+#define SDEW 0x1124c
+#define CSIEW0 0x11250
+#define CSIEW1 0x11254
+#define CSIEW2 0x11258
+#define PEW 0x1125c
+#define DEW 0x11270
+#define MCHAFE 0x112c0
+#define CSIEC 0x112e0
+#define DMIEC 0x112e4
+#define DDREC 0x112e8
+#define PEG0EC 0x112ec
+#define PEG1EC 0x112f0
+#define GFXEC 0x112f4
+#define RPPREVBSYTUPAVG 0x113b8
+#define RPPREVBSYTDNAVG 0x113bc
+#define ECR 0x11600
+#define ECR_GPFE (1<<31)
+#define ECR_IMONE (1<<30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 0x11608
+#define OGW1 0x1160c
+#define EG0 0x11610
+#define EG1 0x11614
+#define EG2 0x11618
+#define EG3 0x1161c
+#define EG4 0x11620
+#define EG5 0x11624
+#define EG6 0x11628
+#define EG7 0x1162c
+#define PXW 0x11664
+#define PXWL 0x11680
+#define LCFUSE02 0x116c0
+#define LCFUSE_HIV_MASK 0x000000ff
+#define CSIPLL0 0x12c10
+#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
/*
@@ -1055,7 +1131,6 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
-#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -2355,6 +2430,8 @@
#define GT_PIPE_NOTIFY (1 << 4)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
+#define GT_BSD_USER_INTERRUPT (1 << 5)
+
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -2690,6 +2767,9 @@
#define SDVO_ENCODING (0)
#define TMDS_ENCODING (2 << 10)
#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
+/* CPT */
+#define HDMI_MODE_SELECT (1 << 9)
+#define DVI_MODE_SELECT (0)
#define SDVOB_BORDER_ENABLE (1 << 7)
#define AUDIO_ENABLE (1 << 6)
#define VSYNC_ACTIVE_HIGH (1 << 4)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9e4c45f..fab2176 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind,
__entry->obj, __entry->gtt_offset)
);
-TRACE_EVENT(i915_gem_object_clflush,
-
- TP_PROTO(struct drm_gem_object *obj),
-
- TP_ARGS(obj),
-
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- ),
-
- TP_printk("obj=%p", __entry->obj)
-);
-
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
@@ -132,6 +115,13 @@ DECLARE_EVENT_CLASS(i915_gem_object,
TP_printk("obj=%p", __entry->obj)
);
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj)
+);
+
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4c748d8..96f75d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e16ac5a..22ff384 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -217,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 hotplug_en;
+ u32 hotplug_en, orig, stat;
+ bool ret = false;
int i, tries = 0;
if (HAS_PCH_SPLIT(dev))
@@ -232,8 +233,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
tries = 2;
else
tries = 1;
- hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- hotplug_en &= CRT_FORCE_HOTPLUG_MASK;
+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= CRT_HOTPLUG_MASK;
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
if (IS_G4X(dev))
@@ -255,11 +256,17 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
} while (time_after(timeout, jiffies));
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
- CRT_HOTPLUG_MONITOR_NONE)
- return true;
+ stat = I915_READ(PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
- return false;
+ /* and put the bits back */
+ I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+ return ret;
}
static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
@@ -569,7 +576,7 @@ void intel_crt_init(struct drm_device *dev)
(1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- connector->interlace_allowed = 0;
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f469a84..cc8131f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1029,19 +1029,28 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
return;
+ if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
+ return; /* Already off, just return */
+
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
- ; /* nothing */
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
+ if (time_after(jiffies, timeout)) {
+ DRM_DEBUG_DRIVER("FBC idle timed out\n");
+ break;
+ }
+ ; /* do nothing */
+ }
intel_wait_for_vblank(dev);
@@ -1239,10 +1248,11 @@ static void intel_update_fbc(struct drm_crtc *crtc,
return;
out_disable:
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev))
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
+ }
}
static int
@@ -1386,7 +1396,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start = obj_priv->gtt_offset;
Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, crtc->fb->pitch);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
I915_WRITE(dspbase, Offset);
@@ -2345,6 +2356,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
}
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -2629,6 +2642,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
struct cxsr_latency {
int is_desktop;
+ int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
@@ -2638,33 +2652,45 @@ struct cxsr_latency {
};
static struct cxsr_latency cxsr_latency_table[] = {
- {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
-
- {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
-
- {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
-
- {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
-
- {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
-
- {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
- int mem)
+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
+ int fsb, int mem)
{
int i;
struct cxsr_latency *latency;
@@ -2675,6 +2701,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
return latency;
}
@@ -2789,8 +2816,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
struct cxsr_latency *latency;
int sr_clock;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev);
@@ -3626,6 +3653,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
}
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+
/* Disable the panel fitter if it was on our pipe */
if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
@@ -3772,6 +3804,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vdisplay -= 1;
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_start -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ adjusted_mode->crtc_vsync_end -= 1;
+ adjusted_mode->crtc_vsync_start -= 1;
+ } else
+ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+
I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -3934,6 +3978,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
+
+ ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
addr = obj_priv->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
@@ -3977,6 +4028,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_bo = bo;
return 0;
+fail_unpin:
+ i915_gem_object_unpin(bo);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
@@ -4436,6 +4489,8 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
+ i915_update_gfx_val(dev_priv);
+
if (IS_I945G(dev) || IS_I945GM(dev)) {
DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
@@ -4564,12 +4619,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
- if (work && !work->pending) {
- obj_priv = to_intel_bo(work->pending_flip_obj);
- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
- obj_priv,
- atomic_read(&obj_priv->pending_flip));
- }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -4629,14 +4678,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc;
- RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
- mutex_lock(&dev->struct_mutex);
-
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
@@ -4646,10 +4692,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
- mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
intel_crtc->unpin_work = work;
@@ -4658,13 +4704,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
- to_intel_bo(obj));
- kfree(work);
- intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ kfree(work);
+
+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+ to_intel_bo(obj));
return ret;
}
@@ -5023,10 +5075,32 @@ err_unref:
return NULL;
}
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
int i = 0;
@@ -5045,13 +5119,21 @@ void ironlake_enable_drps(struct drm_device *dev)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
+ fstart = fmax;
+
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
+ dev_priv->fmax = fstart; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fmax;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
+ fstart);
+
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
/*
@@ -5073,20 +5155,19 @@ void ironlake_enable_drps(struct drm_device *dev)
}
msleep(1);
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE(MEMSWCTL, rgvswctl);
- POSTING_READ(MEMSWCTL);
+ ironlake_set_drps(dev, fstart);
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->last_time2);
}
void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvswctl;
- u8 fstart;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -5096,11 +5177,7 @@ void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE(MEMSWCTL, rgvswctl);
+ ironlake_set_drps(dev, dev_priv->fstart);
msleep(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
@@ -5108,6 +5185,92 @@ void ironlake_disable_drps(struct drm_device *dev)
}
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5277,11 +5440,13 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), "
+ "(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3": "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
@@ -5310,7 +5475,6 @@ static void intel_init_display(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int num_pipe;
int i;
drm_mode_config_init(dev);
@@ -5340,13 +5504,13 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
if (IS_MOBILE(dev) || IS_I9XX(dev))
- num_pipe = 2;
+ dev_priv->num_pipe = 2;
else
- num_pipe = 1;
+ dev_priv->num_pipe = 1;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
- num_pipe, num_pipe > 1 ? "s" : "");
+ dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
- for (i = 0; i < num_pipe; i++) {
+ for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
}
@@ -5354,8 +5518,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
- if (IS_IRONLAKE_M(dev))
+ if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6b1c9a2..49b54f0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -576,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_encoder *intel_encoder;
struct intel_dp_priv *dp_priv;
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
@@ -675,10 +675,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->link_configuration[1] = dp_priv->lane_count;
/*
- * Check for DPCD version > 1.1,
- * enable enahanced frame stuff in that case
+ * Check for DPCD version > 1.1 and enhanced framing support
*/
- if (dp_priv->dpcd[0] >= 0x11) {
+ if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
@@ -1208,6 +1207,8 @@ ironlake_dp_detect(struct drm_connector *connector)
if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
+ dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
return status;
}
@@ -1352,7 +1353,7 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6f53cf7..c3c5052 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -105,7 +105,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
}
/* Flush everything out, we'll be doing GTT only from now on */
- i915_gem_object_set_to_gtt_domain(fbo, 1);
+ ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
+ if (ret) {
+ DRM_ERROR("failed to bind fb: %d.\n", ret);
+ goto out_unpin;
+ }
info = framebuffer_alloc(0, device);
if (!info) {
@@ -241,6 +245,7 @@ int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
if (!ifbdev)
@@ -249,8 +254,13 @@ int intel_fbdev_init(struct drm_device *dev)
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
- drm_fb_helper_init(dev, &ifbdev->helper, 2,
- INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper,
+ dev_priv->num_pipe,
+ INTELFB_CONN_LIMIT);
+ if (ret) {
+ kfree(ifbdev);
+ return ret;
+ }
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
drm_fb_helper_initial_config(&ifbdev->helper, 32);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 65727f0..83bd764 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -59,8 +59,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
SDVO_VSYNC_ACTIVE_HIGH |
SDVO_HSYNC_ACTIVE_HIGH;
- if (hdmi_priv->has_hdmi_sink)
+ if (hdmi_priv->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
+ if (HAS_PCH_CPT(dev))
+ sdvox |= HDMI_MODE_SELECT;
+ }
if (intel_crtc->pipe == 1) {
if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index b0e17b0..d7ad513 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- RING_LOCALS;
+ drm_i915_private_t *dev_priv = dev->dev_private;
BUG_ON(overlay->active);
@@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev,
+ overlay->last_flip_req, 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
- RING_LOCALS;
BUG_ON(!overlay->active);
@@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
}
static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
u32 tmp;
- RING_LOCALS;
if (overlay->last_flip_req != 0) {
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret == 0) {
overlay->last_flip_req = 0;
@@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- RING_LOCALS;
BUG_ON(!overlay->active);
@@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
- ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ 1, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -390,22 +396,23 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
int interruptible)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
+ drm_i915_private_t *dev_priv = dev->dev_private;
u32 flip_addr;
int ret;
- RING_LOCALS;
if (overlay->hw_wedged == HW_WEDGED)
return -EIO;
if (overlay->last_flip_req == 0) {
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
}
- ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ interruptible, &dev_priv->render_ring);
if (ret != 0)
return ret;
@@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ overlay->last_flip_req = i915_add_request(dev, NULL,
+ 0, &dev_priv->render_ring);
if (overlay->last_flip_req == 0)
return -ENOMEM;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible);
+ interruptible, &dev_priv->render_ring);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..cea4f1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,849 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+static void
+render_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+#endif
+ u32 cmd;
+ trace_i915_gem_request_flush(dev, ring->next_seqno,
+ invalidate_domains, flush_domains);
+
+ if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+ intel_ring_begin(dev, ring, 8);
+ intel_ring_emit(dev, ring, cmd);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+ }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+ return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+static int init_ring_common(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 head;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ obj_priv = to_intel_bo(ring->gem_object);
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(ring->regs.ctl, 0);
+ I915_WRITE(ring->regs.head, 0);
+ I915_WRITE(ring->regs.tail, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+ head = ring->get_head(dev, ring);
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_ERROR("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+
+ I915_WRITE(ring->regs.head, 0);
+
+ DRM_ERROR("%s head forced to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+ }
+
+ I915_WRITE(ring->regs.ctl,
+ ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_NO_REPORT | RING_VALID);
+
+ head = I915_READ(ring->regs.head) & HEAD_ADDR;
+ /* If the head is still not zero, the ring is dead */
+ if (head != 0) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ(ring->regs.ctl),
+ I915_READ(ring->regs.head),
+ I915_READ(ring->regs.tail),
+ I915_READ(ring->regs.start));
+ return -EIO;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = ring->get_head(dev, ring);
+ ring->tail = ring->get_tail(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ }
+ return 0;
+}
+
+static int init_render_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = init_ring_common(dev, ring);
+ if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ I915_WRITE(MI_MODE,
+ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ }
+ return ret;
+}
+
+#define PIPE_CONTROL_FLUSH(addr) \
+do { \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL | 2); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+} while (0)
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static u32
+render_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains)
+{
+ u32 seqno;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ seqno = intel_ring_get_seqno(dev, ring);
+
+ if (IS_GEN6(dev)) {
+ BEGIN_LP_RING(6);
+ OUT_RING(GFX_OP_PIPE_CONTROL | 3);
+ OUT_RING(PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+ return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ else
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+ if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ else
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ }
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (IS_GEN6(dev)) {
+ I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+ I915_READ(HWS_PGA_GEN6); /* posting read */
+ } else {
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+ I915_READ(HWS_PGA); /* posting read */
+ }
+
+}
+
+void
+bsd_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ intel_ring_begin(dev, ring, 8);
+ intel_ring_emit(dev, ring, MI_FLUSH);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+}
+
+static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BSD_RING_ACTHD);
+}
+
+static inline void bsd_ring_advance_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BSD_RING_TAIL, ring->tail);
+}
+
+static int init_bsd_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ return init_ring_common(dev, ring);
+}
+
+static u32
+bsd_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains)
+{
+ u32 seqno;
+ seqno = intel_ring_get_seqno(dev, ring);
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(dev, ring,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(dev, ring, seqno);
+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+ intel_ring_advance(dev, ring);
+
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+ return seqno;
+}
+
+static void bsd_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+ I915_READ(BSD_HWS_PGA);
+}
+
+static void
+bsd_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+static void
+bsd_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+
+static u32
+bsd_ring_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int
+bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
+ (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(dev, ring, exec_start);
+ intel_ring_advance(dev, ring);
+ return 0;
+}
+
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint32_t exec_start, exec_len;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, cliprects, i,
+ exec->DR1, exec->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
+ intel_ring_emit(dev, ring,
+ exec_start | MI_BATCH_NON_SECURE);
+ intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+ intel_ring_emit(dev, ring, 0);
+ } else {
+ intel_ring_begin(dev, ring, 4);
+ if (IS_I965G(dev)) {
+ intel_ring_emit(dev, ring,
+ MI_BATCH_BUFFER_START | (2 << 6)
+ | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(dev, ring, exec_start);
+ } else {
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
+ | (2 << 6));
+ intel_ring_emit(dev, ring, exec_start |
+ MI_BATCH_NON_SECURE);
+ }
+ }
+ intel_ring_advance(dev, ring);
+ }
+
+ /* XXX breadcrumb */
+ return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+ obj_priv = to_intel_bo(obj);
+
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ ring->status_page.obj = NULL;
+
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+
+static int init_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ goto err_unref;
+ }
+
+ ring->status_page.gfx_addr = obj_priv->gtt_offset;
+ ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+ if (ring->status_page.page_addr == NULL) {
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ goto err_unpin;
+ }
+ ring->status_page.obj = obj;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ ring->setup_status_page(dev, ring);
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
+}
+
+
+int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ int ret;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ ring->dev = dev;
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(dev, ring);
+ if (ret)
+ return ret;
+ }
+
+ obj = i915_gem_alloc_object(dev, ring->size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ring->gem_object = obj;
+
+ ret = i915_gem_object_pin(obj, ring->alignment);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ goto cleanup;
+ }
+
+ obj_priv = to_intel_bo(obj);
+ ring->map.size = ring->size;
+ ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ ring->virtual_start = ring->map.handle;
+ ret = ring->init(dev, ring);
+ if (ret != 0) {
+ intel_cleanup_ring_buffer(dev, ring);
+ return ret;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = ring->get_head(dev, ring);
+ ring->tail = ring->get_tail(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ }
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ return ret;
+cleanup:
+ cleanup_status_page(dev, ring);
+ return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->gem_object == NULL)
+ return;
+
+ drm_core_ioremapfree(&ring->map, dev);
+
+ i915_gem_object_unpin(ring->gem_object);
+ drm_gem_object_unreference(ring->gem_object);
+ ring->gem_object = NULL;
+ cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ unsigned int *virt;
+ int rem;
+ rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = intel_wait_ring_buffer(dev, ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = (unsigned int *)(ring->virtual_start + ring->tail);
+ rem /= 4;
+ while (rem--)
+ *virt++ = MI_NOOP;
+
+ ring->tail = 0;
+
+ return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n)
+{
+ unsigned long end;
+
+ trace_i915_ring_wait_begin (dev);
+ end = jiffies + 3 * HZ;
+ do {
+ ring->head = ring->get_head(dev, ring);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ if (ring->space >= n) {
+ trace_i915_ring_wait_end (dev);
+ return 0;
+ }
+
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
+ yield();
+ } while (!time_after(jiffies, end));
+ trace_i915_ring_wait_end (dev);
+ return -EBUSY;
+}
+
+void intel_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n)
+{
+ if (unlikely(ring->tail + n > ring->size))
+ intel_wrap_ring_buffer(dev, ring);
+ if (unlikely(ring->space < n))
+ intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, unsigned int data)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ *virt = data;
+ ring->tail += 4;
+ ring->tail &= ring->size - 1;
+ ring->space -= 4;
+}
+
+void intel_ring_advance(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ void *data,
+ unsigned int len)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ BUG_ON((len&~(4-1)) != 0);
+ intel_ring_begin(dev, ring, len);
+ memcpy(virt, data, len);
+ ring->tail += len;
+ ring->tail &= ring->size - 1;
+ ring->space -= len;
+ intel_ring_advance(dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ seqno = ring->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++ring->next_seqno == 0)
+ ring->next_seqno = 1;
+ return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+ .name = "render ring",
+ .regs = {
+ .ctl = PRB0_CTL,
+ .head = PRB0_HEAD,
+ .tail = PRB0_TAIL,
+ .start = PRB0_START
+ },
+ .ring_flag = I915_EXEC_RENDER,
+ .size = 32 * PAGE_SIZE,
+ .alignment = PAGE_SIZE,
+ .virtual_start = NULL,
+ .dev = NULL,
+ .gem_object = NULL,
+ .head = 0,
+ .tail = 0,
+ .space = 0,
+ .next_seqno = 1,
+ .user_irq_refcount = 0,
+ .irq_gem_seqno = 0,
+ .waiting_gem_seqno = 0,
+ .setup_status_page = render_setup_status_page,
+ .init = init_render_ring,
+ .get_head = render_ring_get_head,
+ .get_tail = render_ring_get_tail,
+ .get_active_head = render_ring_get_active_head,
+ .advance_ring = render_ring_advance_ring,
+ .flush = render_ring_flush,
+ .add_request = render_ring_add_request,
+ .get_gem_seqno = render_ring_get_gem_seqno,
+ .user_irq_get = render_ring_get_user_irq,
+ .user_irq_put = render_ring_put_user_irq,
+ .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+ .status_page = {NULL, 0, NULL},
+ .map = {0,}
+};
+
+/* ring buffer for bit-stream decoder */
+
+struct intel_ring_buffer bsd_ring = {
+ .name = "bsd ring",
+ .regs = {
+ .ctl = BSD_RING_CTL,
+ .head = BSD_RING_HEAD,
+ .tail = BSD_RING_TAIL,
+ .start = BSD_RING_START
+ },
+ .ring_flag = I915_EXEC_BSD,
+ .size = 32 * PAGE_SIZE,
+ .alignment = PAGE_SIZE,
+ .virtual_start = NULL,
+ .dev = NULL,
+ .gem_object = NULL,
+ .head = 0,
+ .tail = 0,
+ .space = 0,
+ .next_seqno = 1,
+ .user_irq_refcount = 0,
+ .irq_gem_seqno = 0,
+ .waiting_gem_seqno = 0,
+ .setup_status_page = bsd_setup_status_page,
+ .init = init_bsd_ring,
+ .get_head = bsd_ring_get_head,
+ .get_tail = bsd_ring_get_tail,
+ .get_active_head = bsd_ring_get_active_head,
+ .advance_ring = bsd_ring_advance_ring,
+ .flush = bsd_ring_flush,
+ .add_request = bsd_ring_add_request,
+ .get_gem_seqno = bsd_ring_get_gem_seqno,
+ .user_irq_get = bsd_ring_get_user_irq,
+ .user_irq_put = bsd_ring_put_user_irq,
+ .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+ .status_page = {NULL, 0, NULL},
+ .map = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..d5568d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct intel_hw_status_page {
+ void *page_addr;
+ unsigned int gfx_addr;
+ struct drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct intel_ring_buffer {
+ const char *name;
+ struct ring_regs {
+ u32 ctl;
+ u32 head;
+ u32 tail;
+ u32 start;
+ } regs;
+ unsigned int ring_flag;
+ unsigned long size;
+ unsigned int alignment;
+ void *virtual_start;
+ struct drm_device *dev;
+ struct drm_gem_object *gem_object;
+
+ unsigned int head;
+ unsigned int tail;
+ unsigned int space;
+ u32 next_seqno;
+ struct intel_hw_status_page status_page;
+
+ u32 irq_gem_seqno; /* last seq seem at irq time */
+ u32 waiting_gem_seqno;
+ int user_irq_refcount;
+ void (*user_irq_get)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*user_irq_put)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*setup_status_page)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+ int (*init)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+ unsigned int (*get_head)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ unsigned int (*get_tail)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ unsigned int (*get_active_head)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*advance_ring)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ void (*flush)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ u32 (*add_request)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains);
+ u32 (*get_gem_seqno)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+ int (*dispatch_gem_execbuffer)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset);
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ wait_queue_head_t irq_queue;
+ drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+ int reg)
+{
+ u32 *regs = ring->status_page.page_addr;
+ return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring, int n);
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ void *data,
+ unsigned int len);
+void intel_ring_advance(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aba72c4..76993ac 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1479,7 +1479,7 @@ intel_find_analog_connector(struct drm_device *dev)
intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector && encoder == intel_attached_encoder(connector))
+ if (encoder == intel_attached_encoder(connector))
return connector;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index e13f6af..d4bcca8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -34,7 +34,7 @@
static struct nouveau_dsm_priv {
bool dsm_detected;
acpi_handle dhandle;
- acpi_handle dsm_handle;
+ acpi_handle rom_handle;
} nouveau_dsm_priv;
static const char nouveau_dsm_muid[] = {
@@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
if (id == VGA_SWITCHEROO_IGD)
- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
- return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
}
static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
if (id == VGA_SWITCHEROO_IGD)
return 0;
- return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
+ return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
static int nouveau_dsm_init(void)
@@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
+
status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
if (ACPI_FAILURE(status)) {
return false;
}
- ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+ ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
if (ret < 0)
return false;
nouveau_dsm_priv.dhandle = dhandle;
- nouveau_dsm_priv.dsm_handle = nvidia_handle;
return true;
}
@@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void)
struct pci_dev *pdev = NULL;
int has_dsm = 0;
int vga_count = 0;
+
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
@@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void)
}
if (vga_count == 2 && has_dsm) {
- acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
@@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void)
{
vga_switcheroo_unregister_handler();
}
+
+/* retrieve the ROM in 4k blocks */
+static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object rom_arg_elements[2], *obj;
+ struct acpi_object_list rom_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ rom_arg.count = 2;
+ rom_arg.pointer = &rom_arg_elements[0];
+
+ rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[0].integer.value = offset;
+
+ rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, len);
+ kfree(buffer.pointer);
+ return len;
+}
+
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+ if (!nouveau_dsm_priv.dsm_detected)
+ return false;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ nouveau_dsm_priv.rom_handle = rom_handle;
+ return true;
+}
+
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e7e69cc..fc924b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -178,6 +178,25 @@ out:
pci_disable_rom(dev->pdev);
}
+static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
+{
+ int i;
+ int ret;
+ int size = 64 * 1024;
+
+ if (!nouveau_acpi_rom_supported(dev->pdev))
+ return;
+
+ for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
+ ret = nouveau_acpi_get_bios_chunk(data,
+ (i * ROM_BIOS_PAGE),
+ ROM_BIOS_PAGE);
+ if (ret <= 0)
+ break;
+ }
+ return;
+}
+
struct methods {
const char desc[8];
void (*loadbios)(struct drm_device *, uint8_t *);
@@ -191,6 +210,7 @@ static struct methods nv04_methods[] = {
};
static struct methods nv50_methods[] = {
+ { "ACPI", load_vbios_acpi, true },
{ "PRAMIN", load_vbios_pramin, true },
{ "PROM", load_vbios_prom, false },
{ "PCIROM", load_vbios_pci, true },
@@ -814,7 +834,7 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
if (i2c_index == 0x81)
i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
- if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
+ if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
return NULL;
}
@@ -2807,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
- nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
+ BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
+ offset, gpio->tag, gpio->state_default);
+ if (bios->execute)
+ nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
/* The NVIDIA binary driver doesn't appear to actually do
* any of this, my VBIOS does however.
@@ -3897,7 +3920,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
static uint8_t *
bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
- uint16_t record, int record_len, int record_nr)
+ uint16_t record, int record_len, int record_nr,
+ bool match_link)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
@@ -3905,12 +3929,28 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
uint16_t table;
int i, v;
+ switch (dcbent->type) {
+ case OUTPUT_TMDS:
+ case OUTPUT_LVDS:
+ case OUTPUT_DP:
+ break;
+ default:
+ match_link = false;
+ break;
+ }
+
for (i = 0; i < record_nr; i++, record += record_len) {
table = ROM16(bios->data[record]);
if (!table)
continue;
entry = ROM32(bios->data[table]);
+ if (match_link) {
+ v = (entry & 0x00c00000) >> 22;
+ if (!(v & dcbent->sorconf.link))
+ continue;
+ }
+
v = (entry & 0x000f0000) >> 16;
if (!(v & dcbent->or))
continue;
@@ -3952,7 +3992,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
*length = table[4];
return bios_output_config_match(dev, dcbent,
bios->display.dp_table_ptr + table[1],
- table[2], table[3]);
+ table[2], table[3], table[0] >= 0x21);
}
int
@@ -4041,7 +4081,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
dcbent->type, dcbent->location, dcbent->or);
otable = bios_output_config_match(dev, dcbent, table[1] +
bios->display.script_table_ptr,
- table[2], table[3]);
+ table[2], table[3], table[0] >= 0x21);
if (!otable) {
NV_ERROR(dev, "Couldn't find matching output script table\n");
return 1;
@@ -5533,12 +5573,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
- /*
- * Normal entries consist of a single bit, but dual link has the
- * next most significant bit set too
- */
- entry->duallink_possible =
- ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
switch (entry->type) {
case OUTPUT_ANALOG:
@@ -5622,6 +5656,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
break;
}
+ if (dcb->version < 0x40) {
+ /* Normal entries consist of a single bit, but dual link has
+ * the next most significant bit set too
+ */
+ entry->duallink_possible =
+ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
+ } else {
+ entry->duallink_possible = (entry->sorconf.link == 3);
+ }
+
/* unsure what DCB version introduces this, 3.0? */
if (conf & 0x100000)
entry->i2c_upper_default = true;
@@ -6205,6 +6249,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
nouveau_i2c_fini(dev, entry);
}
+static bool
+nouveau_bios_posted(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool was_locked;
+ unsigned htotal;
+
+ if (dev_priv->chipset >= NV_50) {
+ if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
+ NVReadVgaCrtc(dev, 0, 0x1a) == 0)
+ return false;
+ return true;
+ }
+
+ was_locked = NVLockVgaCrtcs(dev, false);
+ htotal = NVReadVgaCrtc(dev, 0, 0x06);
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
+ htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
+ NVLockVgaCrtcs(dev, was_locked);
+ return (htotal != 0);
+}
+
int
nouveau_bios_init(struct drm_device *dev)
{
@@ -6239,11 +6307,9 @@ nouveau_bios_init(struct drm_device *dev)
bios->execute = false;
/* ... unless card isn't POSTed already */
- if (dev_priv->card_type >= NV_10 &&
- NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
- NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
+ if (!nouveau_bios_posted(dev)) {
NV_INFO(dev, "Adaptor not initialised\n");
- if (dev_priv->card_type < NV_50) {
+ if (dev_priv->card_type < NV_40) {
NV_ERROR(dev, "Unable to POST this chipset\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 266b0ff..149ed22 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -432,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
static struct drm_display_mode *
-nouveau_connector_native_mode(struct nouveau_connector *connector)
+nouveau_connector_native_mode(struct drm_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct drm_connector_helper_funcs *helper = connector->helper_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *largest = NULL;
int high_w = 0, high_h = 0, high_v = 0;
- /* Use preferred mode if there is one.. */
- list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+ if (helper->mode_valid(connector, mode) != MODE_OK)
+ continue;
+
+ /* Use preferred mode if there is one.. */
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
NV_DEBUG_KMS(dev, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
- }
- /* Otherwise, take the resolution with the largest width, then height,
- * then vertical refresh
- */
- list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ /* Otherwise, take the resolution with the largest width, then
+ * height, then vertical refresh
+ */
if (mode->hdisplay < high_w)
continue;
@@ -553,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
*/
if (!nv_connector->native_mode)
nv_connector->native_mode =
- nouveau_connector_native_mode(nv_connector);
+ nouveau_connector_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@@ -584,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
switch (nv_encoder->dcb->type) {
case OUTPUT_LVDS:
- BUG_ON(!nv_connector->native_mode);
- if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
- mode->vdisplay > nv_connector->native_mode->vdisplay)
+ if (nv_connector->native_mode &&
+ (mode->hdisplay > nv_connector->native_mode->hdisplay ||
+ mode->vdisplay > nv_connector->native_mode->vdisplay))
return MODE_PANEL;
min_clock = 0;
@@ -594,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
break;
case OUTPUT_TMDS:
if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
- (dev_priv->card_type < NV_50 &&
- !nv_encoder->dcb->duallink_possible))
+ !nv_encoder->dcb->duallink_possible)
max_clock = 165000;
else
max_clock = 330000;
@@ -729,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
if (ret == 0)
goto out;
nv_connector->detected_encoder = nv_encoder;
- nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
+ nv_connector->native_mode = nouveau_connector_native_mode(connector);
list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 49fa7b2..cb1ce2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -40,6 +40,8 @@ struct nouveau_crtc {
int sharpness;
int last_dpms;
+ int cursor_saved_x, cursor_saved_y;
+
struct {
int cpp;
bool blanked;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index c6079e3..2737704 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -175,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
nouveau_bo_unpin(nouveau_fb->nvbo);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
+
NV_INFO(dev, "Evicting buffers...\n");
ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
@@ -314,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev)
nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ NV_ERROR(dev, "Could not pin/map cursor.\n");
+ }
+
if (dev_priv->card_type < NV_50) {
nv04_display_restore(dev);
NVLockVgaCrtcs(dev, false);
} else
nv50_display_init(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->cursor.set_offset(nv_crtc,
+ nv_crtc->cursor.nvbo->bo.offset -
+ dev_priv->vm_vram_base);
+
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
+
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5b13443..c697191 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -851,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *);
extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
/* nouveau_acpi.c */
+#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
#endif
/* nouveau_backlight.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index fd4a2df..c9a4a0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -377,6 +377,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fbdev *nfbdev;
+ int ret;
nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
if (!nfbdev)
@@ -386,7 +387,12 @@ int nouveau_fbcon_init(struct drm_device *dev)
dev_priv->nfbdev = nfbdev;
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
- drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+ ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+ if (ret) {
+ kfree(nfbdev);
+ return ret;
+ }
+
drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
drm_fb_helper_initial_config(&nfbdev->helper, 32);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 775a701..c1fd42b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev)
dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
+ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
+ dev_priv->vram_sys_base <<= 12;
}
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e632339..b02a231 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,12 +376,15 @@ out_err:
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
nouveau_pci_resume(pdev);
+ drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ drm_kms_helper_poll_disable(dev);
nouveau_pci_suspend(pdev, pmm);
}
}
@@ -776,29 +779,24 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
return ret;
}
- /* map larger RAMIN aperture on NV40 cards */
- dev_priv->ramin = NULL;
+ /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
if (dev_priv->card_type >= NV_40) {
int ramin_bar = 2;
if (pci_resource_len(dev->pdev, ramin_bar) == 0)
ramin_bar = 3;
dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
- dev_priv->ramin = ioremap(
- pci_resource_start(dev->pdev, ramin_bar),
+ dev_priv->ramin =
+ ioremap(pci_resource_start(dev->pdev, ramin_bar),
dev_priv->ramin_size);
if (!dev_priv->ramin) {
- NV_ERROR(dev, "Failed to init RAMIN mapping, "
- "limited instance memory available\n");
+ NV_ERROR(dev, "Failed to PRAMIN BAR");
+ return -ENOMEM;
}
- }
-
- /* On older cards (or if the above failed), create a map covering
- * the BAR0 PRAMIN aperture */
- if (!dev_priv->ramin) {
+ } else {
dev_priv->ramin_size = 1 * 1024 * 1024;
dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
- dev_priv->ramin_size);
+ dev_priv->ramin_size);
if (!dev_priv->ramin) {
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
return -ENOMEM;
@@ -913,6 +911,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
+ case NOUVEAU_GETPARAM_PTIMER_TIME:
+ getparam->value = dev_priv->engine.timer.read(dev);
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index 89a91b9..aaf3de3 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
static void
nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
NV_PRAMDAC_CU_START_POS,
XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 753e723..03ad7ab 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
struct drm_device *dev = nv_crtc->base.dev;
+ nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
((y & 0xFFFF) << 16) | (x & 0xFFFF));
/* Needed to make the cursor move. */
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index a95e694..32611bd 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -6,10 +6,16 @@
int
nv50_fb_init(struct drm_device *dev)
{
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
switch (dev_priv->chipset) {
case 0x50:
nv_wr32(dev, 0x100c90, 0x0707ff);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index c61782b..bb47ad7 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -31,7 +31,7 @@ nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
{
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- if (gpio->line > 32)
+ if (gpio->line >= 32)
return -EINVAL;
*reg = nv50_gpio_reg[gpio->line >> 3];
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b11eaf9..812778d 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
int
nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
bool dum;
@@ -324,11 +323,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
uint32_t tmp;
- if (dev_priv->chipset < 0x90 ||
- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
- tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
- else
- tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
switch ((tmp & 0x00000f00) >> 8) {
case 8:
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3c91312..84b1f27 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
+
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
@@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o
+ evergreen.o evergreen_cs.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8c8e4d3..4b6623d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,7 +41,18 @@ void evergreen_fini(struct radeon_device *rdev);
void evergreen_pm_misc(struct radeon_device *rdev)
{
-
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
}
void evergreen_pm_prepare(struct radeon_device *rdev)
@@ -2148,7 +2159,7 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->accel_working = false;
+ rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 0000000..64516b9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,1356 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include "drmP.h"
+#include "radeon.h"
+#include "evergreend.h"
+#include "evergreen_reg_safe.h"
+
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+
+struct evergreen_cs_track {
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ /* value we track */
+ u32 nsamples;
+ u32 cb_color_base_last[12];
+ struct radeon_bo *cb_color_bo[12];
+ u32 cb_color_bo_offset[12];
+ struct radeon_bo *cb_color_fmask_bo[8];
+ struct radeon_bo *cb_color_cmask_bo[8];
+ u32 cb_color_info[12];
+ u32 cb_color_view[12];
+ u32 cb_color_pitch_idx[12];
+ u32 cb_color_slice_idx[12];
+ u32 cb_color_dim_idx[12];
+ u32 cb_color_dim[12];
+ u32 cb_color_pitch[12];
+ u32 cb_color_slice[12];
+ u32 cb_color_cmask_slice[8];
+ u32 cb_color_fmask_slice[8];
+ u32 cb_target_mask;
+ u32 cb_shader_mask;
+ u32 vgt_strmout_config;
+ u32 vgt_strmout_buffer_config;
+ u32 db_depth_control;
+ u32 db_depth_view;
+ u32 db_depth_size;
+ u32 db_depth_size_idx;
+ u32 db_z_info;
+ u32 db_z_idx;
+ u32 db_z_read_offset;
+ u32 db_z_write_offset;
+ struct radeon_bo *db_z_read_bo;
+ struct radeon_bo *db_z_write_bo;
+ u32 db_s_info;
+ u32 db_s_idx;
+ u32 db_s_read_offset;
+ u32 db_s_write_offset;
+ struct radeon_bo *db_s_read_bo;
+ struct radeon_bo *db_s_write_bo;
+};
+
+static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ track->cb_color_fmask_bo[i] = NULL;
+ track->cb_color_cmask_bo[i] = NULL;
+ track->cb_color_cmask_slice[i] = 0;
+ track->cb_color_fmask_slice[i] = 0;
+ }
+
+ for (i = 0; i < 12; i++) {
+ track->cb_color_base_last[i] = 0;
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ track->cb_color_info[i] = 0;
+ track->cb_color_view[i] = 0;
+ track->cb_color_pitch_idx[i] = 0;
+ track->cb_color_slice_idx[i] = 0;
+ track->cb_color_dim[i] = 0;
+ track->cb_color_pitch[i] = 0;
+ track->cb_color_slice[i] = 0;
+ track->cb_color_dim[i] = 0;
+ }
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
+ track->db_depth_control = 0xFFFFFFFF;
+ track->db_z_info = 0xFFFFFFFF;
+ track->db_z_idx = 0xFFFFFFFF;
+ track->db_z_read_offset = 0xFFFFFFFF;
+ track->db_z_write_offset = 0xFFFFFFFF;
+ track->db_z_read_bo = NULL;
+ track->db_z_write_bo = NULL;
+ track->db_s_info = 0xFFFFFFFF;
+ track->db_s_idx = 0xFFFFFFFF;
+ track->db_s_read_offset = 0xFFFFFFFF;
+ track->db_s_write_offset = 0xFFFFFFFF;
+ track->db_s_read_bo = NULL;
+ track->db_s_write_bo = NULL;
+}
+
+static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+ /* XXX fill in */
+ return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+
+ /* we don't support stream out buffer yet */
+ if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
+ }
+
+ /* XXX fill in */
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = CP_PACKET_GET_TYPE(header);
+ pkt->count = CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case PACKET_TYPE0:
+ pkt->reg = CP_PACKET0_GET_REG(header);
+ break;
+ case PACKET_TYPE3:
+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+ break;
+ case PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser: parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ struct radeon_cs_packet p3reloc, wait_reg_mem;
+ int crtc_id;
+ int r;
+ uint32_t header, h_idx, reg, wait_reg_mem_info;
+ volatile uint32_t *ib;
+
+ ib = p->ib->ptr;
+
+ /* parse the WAIT_REG_MEM */
+ r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ if (r)
+ return r;
+
+ /* check its a WAIT_REG_MEM */
+ if (wait_reg_mem.type != PACKET_TYPE3 ||
+ wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+ DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+ /* bit 4 is reg (0) or mem (1) */
+ if (wait_reg_mem_info & 0x10) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ r = -EINVAL;
+ return r;
+ }
+ /* waiting for value to be equal */
+ if ((wait_reg_mem_info & 0x7) != 0x3) {
+ DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ r = -EINVAL;
+ return r;
+ }
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
+ DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
+ DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ r = -EINVAL;
+ return r;
+ }
+
+ /* jump over the NOP */
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ if (r)
+ return r;
+
+ h_idx = p->idx - 2;
+ p->idx += wait_reg_mem.count + 2;
+ p->idx += p3reloc.count + 2;
+
+ header = radeon_get_ib_value(p, h_idx);
+ crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+ reg = CP_PACKET0_GET_REG(header);
+ mutex_lock(&p->rdev->ddev->mode_config.mutex);
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ r = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+ radeon_crtc = to_radeon_crtc(crtc);
+ crtc_id = radeon_crtc->crtc_id;
+
+ if (!crtc->enabled) {
+ /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ ib[h_idx + 2] = PACKET2(0);
+ ib[h_idx + 3] = PACKET2(0);
+ ib[h_idx + 4] = PACKET2(0);
+ ib[h_idx + 5] = PACKET2(0);
+ ib[h_idx + 6] = PACKET2(0);
+ ib[h_idx + 7] = PACKET2(0);
+ ib[h_idx + 8] = PACKET2(0);
+ } else {
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
+ ib[h_idx] = header;
+ ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
+ break;
+ default:
+ DRM_ERROR("unknown crtc reloc\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&p->rdev->ddev->mode_config.mutex);
+ return r;
+}
+
+static int evergreen_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ int r;
+
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ r = evergreen_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ return r;
+ }
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ unsigned reg, i;
+ unsigned idx;
+ int r;
+
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+ r = evergreen_packet0_check(p, pkt, idx, reg);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against evergreen_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+ u32 m, i, tmp, *ib;
+ int r;
+
+ i = (reg >> 7);
+ if (i > last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ ib = p->ib->ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attemp to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ /* get value to populate the IB don't remove */
+ tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;
+ break;
+ case DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ break;
+ case DB_Z_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_info = radeon_get_ib_value(p, idx);
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case DB_STENCIL_INFO:
+ track->db_s_info = radeon_get_ib_value(p, idx);
+ break;
+ case DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ break;
+ case DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_depth_size_idx = idx;
+ break;
+ case DB_Z_READ_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_read_bo = reloc->robj;
+ break;
+ case DB_Z_WRITE_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_write_bo = reloc->robj;
+ break;
+ case DB_STENCIL_READ_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_read_bo = reloc->robj;
+ break;
+ case DB_STENCIL_WRITE_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_write_bo = reloc->robj;
+ break;
+ case VGT_STRMOUT_CONFIG:
+ track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+ break;
+ case VGT_STRMOUT_BUFFER_CONFIG:
+ track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+ break;
+ case CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ break;
+ case CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ break;
+ case PA_SC_AA_CONFIG:
+ tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
+ case CB_COLOR0_VIEW:
+ case CB_COLOR1_VIEW:
+ case CB_COLOR2_VIEW:
+ case CB_COLOR3_VIEW:
+ case CB_COLOR4_VIEW:
+ case CB_COLOR5_VIEW:
+ case CB_COLOR6_VIEW:
+ case CB_COLOR7_VIEW:
+ tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR8_VIEW:
+ case CB_COLOR9_VIEW:
+ case CB_COLOR10_VIEW:
+ case CB_COLOR11_VIEW:
+ tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_INFO:
+ case CB_COLOR1_INFO:
+ case CB_COLOR2_INFO:
+ case CB_COLOR3_INFO:
+ case CB_COLOR4_INFO:
+ case CB_COLOR5_INFO:
+ case CB_COLOR6_INFO:
+ case CB_COLOR7_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case CB_COLOR8_INFO:
+ case CB_COLOR9_INFO:
+ case CB_COLOR10_INFO:
+ case CB_COLOR11_INFO:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ }
+ break;
+ case CB_COLOR0_PITCH:
+ case CB_COLOR1_PITCH:
+ case CB_COLOR2_PITCH:
+ case CB_COLOR3_PITCH:
+ case CB_COLOR4_PITCH:
+ case CB_COLOR5_PITCH:
+ case CB_COLOR6_PITCH:
+ case CB_COLOR7_PITCH:
+ tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_pitch_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_PITCH:
+ case CB_COLOR9_PITCH:
+ case CB_COLOR10_PITCH:
+ case CB_COLOR11_PITCH:
+ tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_pitch_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_SLICE:
+ case CB_COLOR1_SLICE:
+ case CB_COLOR2_SLICE:
+ case CB_COLOR3_SLICE:
+ case CB_COLOR4_SLICE:
+ case CB_COLOR5_SLICE:
+ case CB_COLOR6_SLICE:
+ case CB_COLOR7_SLICE:
+ tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_SLICE:
+ case CB_COLOR9_SLICE:
+ case CB_COLOR10_SLICE:
+ case CB_COLOR11_SLICE:
+ tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_ATTRIB:
+ case CB_COLOR1_ATTRIB:
+ case CB_COLOR2_ATTRIB:
+ case CB_COLOR3_ATTRIB:
+ case CB_COLOR4_ATTRIB:
+ case CB_COLOR5_ATTRIB:
+ case CB_COLOR6_ATTRIB:
+ case CB_COLOR7_ATTRIB:
+ case CB_COLOR8_ATTRIB:
+ case CB_COLOR9_ATTRIB:
+ case CB_COLOR10_ATTRIB:
+ case CB_COLOR11_ATTRIB:
+ break;
+ case CB_COLOR0_DIM:
+ case CB_COLOR1_DIM:
+ case CB_COLOR2_DIM:
+ case CB_COLOR3_DIM:
+ case CB_COLOR4_DIM:
+ case CB_COLOR5_DIM:
+ case CB_COLOR6_DIM:
+ case CB_COLOR7_DIM:
+ tmp = (reg - CB_COLOR0_DIM) / 0x3c;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
+ break;
+ case CB_COLOR8_DIM:
+ case CB_COLOR9_DIM:
+ case CB_COLOR10_DIM:
+ case CB_COLOR11_DIM:
+ tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
+ track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_dim_idx[tmp] = idx;
+ break;
+ case CB_COLOR0_FMASK:
+ case CB_COLOR1_FMASK:
+ case CB_COLOR2_FMASK:
+ case CB_COLOR3_FMASK:
+ case CB_COLOR4_FMASK:
+ case CB_COLOR5_FMASK:
+ case CB_COLOR6_FMASK:
+ case CB_COLOR7_FMASK:
+ tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_fmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_CMASK:
+ case CB_COLOR1_CMASK:
+ case CB_COLOR2_CMASK:
+ case CB_COLOR3_CMASK:
+ case CB_COLOR4_CMASK:
+ case CB_COLOR5_CMASK:
+ case CB_COLOR6_CMASK:
+ case CB_COLOR7_CMASK:
+ tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_cmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_FMASK_SLICE:
+ case CB_COLOR1_FMASK_SLICE:
+ case CB_COLOR2_FMASK_SLICE:
+ case CB_COLOR3_FMASK_SLICE:
+ case CB_COLOR4_FMASK_SLICE:
+ case CB_COLOR5_FMASK_SLICE:
+ case CB_COLOR6_FMASK_SLICE:
+ case CB_COLOR7_FMASK_SLICE:
+ tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
+ track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_CMASK_SLICE:
+ case CB_COLOR1_CMASK_SLICE:
+ case CB_COLOR2_CMASK_SLICE:
+ case CB_COLOR3_CMASK_SLICE:
+ case CB_COLOR4_CMASK_SLICE:
+ case CB_COLOR5_CMASK_SLICE:
+ case CB_COLOR6_CMASK_SLICE:
+ case CB_COLOR7_CMASK_SLICE:
+ tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
+ track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR8_BASE:
+ case CB_COLOR9_BASE:
+ case CB_COLOR10_BASE:
+ case CB_COLOR11_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case CB_IMMED0_BASE:
+ case CB_IMMED1_BASE:
+ case CB_IMMED2_BASE:
+ case CB_IMMED3_BASE:
+ case CB_IMMED4_BASE:
+ case CB_IMMED5_BASE:
+ case CB_IMMED6_BASE:
+ case CB_IMMED7_BASE:
+ case CB_IMMED8_BASE:
+ case CB_IMMED9_BASE:
+ case CB_IMMED10_BASE:
+ case CB_IMMED11_BASE:
+ case DB_HTILE_DATA_BASE:
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ case SQ_PGM_START_HS:
+ case SQ_PGM_START_LS:
+ case GDS_ADDR_BASE:
+ case SQ_CONST_MEM_BASE:
+ case SQ_ALU_CONST_CACHE_GS_0:
+ case SQ_ALU_CONST_CACHE_GS_1:
+ case SQ_ALU_CONST_CACHE_GS_2:
+ case SQ_ALU_CONST_CACHE_GS_3:
+ case SQ_ALU_CONST_CACHE_GS_4:
+ case SQ_ALU_CONST_CACHE_GS_5:
+ case SQ_ALU_CONST_CACHE_GS_6:
+ case SQ_ALU_CONST_CACHE_GS_7:
+ case SQ_ALU_CONST_CACHE_GS_8:
+ case SQ_ALU_CONST_CACHE_GS_9:
+ case SQ_ALU_CONST_CACHE_GS_10:
+ case SQ_ALU_CONST_CACHE_GS_11:
+ case SQ_ALU_CONST_CACHE_GS_12:
+ case SQ_ALU_CONST_CACHE_GS_13:
+ case SQ_ALU_CONST_CACHE_GS_14:
+ case SQ_ALU_CONST_CACHE_GS_15:
+ case SQ_ALU_CONST_CACHE_PS_0:
+ case SQ_ALU_CONST_CACHE_PS_1:
+ case SQ_ALU_CONST_CACHE_PS_2:
+ case SQ_ALU_CONST_CACHE_PS_3:
+ case SQ_ALU_CONST_CACHE_PS_4:
+ case SQ_ALU_CONST_CACHE_PS_5:
+ case SQ_ALU_CONST_CACHE_PS_6:
+ case SQ_ALU_CONST_CACHE_PS_7:
+ case SQ_ALU_CONST_CACHE_PS_8:
+ case SQ_ALU_CONST_CACHE_PS_9:
+ case SQ_ALU_CONST_CACHE_PS_10:
+ case SQ_ALU_CONST_CACHE_PS_11:
+ case SQ_ALU_CONST_CACHE_PS_12:
+ case SQ_ALU_CONST_CACHE_PS_13:
+ case SQ_ALU_CONST_CACHE_PS_14:
+ case SQ_ALU_CONST_CACHE_PS_15:
+ case SQ_ALU_CONST_CACHE_VS_0:
+ case SQ_ALU_CONST_CACHE_VS_1:
+ case SQ_ALU_CONST_CACHE_VS_2:
+ case SQ_ALU_CONST_CACHE_VS_3:
+ case SQ_ALU_CONST_CACHE_VS_4:
+ case SQ_ALU_CONST_CACHE_VS_5:
+ case SQ_ALU_CONST_CACHE_VS_6:
+ case SQ_ALU_CONST_CACHE_VS_7:
+ case SQ_ALU_CONST_CACHE_VS_8:
+ case SQ_ALU_CONST_CACHE_VS_9:
+ case SQ_ALU_CONST_CACHE_VS_10:
+ case SQ_ALU_CONST_CACHE_VS_11:
+ case SQ_ALU_CONST_CACHE_VS_12:
+ case SQ_ALU_CONST_CACHE_VS_13:
+ case SQ_ALU_CONST_CACHE_VS_14:
+ case SQ_ALU_CONST_CACHE_VS_15:
+ case SQ_ALU_CONST_CACHE_HS_0:
+ case SQ_ALU_CONST_CACHE_HS_1:
+ case SQ_ALU_CONST_CACHE_HS_2:
+ case SQ_ALU_CONST_CACHE_HS_3:
+ case SQ_ALU_CONST_CACHE_HS_4:
+ case SQ_ALU_CONST_CACHE_HS_5:
+ case SQ_ALU_CONST_CACHE_HS_6:
+ case SQ_ALU_CONST_CACHE_HS_7:
+ case SQ_ALU_CONST_CACHE_HS_8:
+ case SQ_ALU_CONST_CACHE_HS_9:
+ case SQ_ALU_CONST_CACHE_HS_10:
+ case SQ_ALU_CONST_CACHE_HS_11:
+ case SQ_ALU_CONST_CACHE_HS_12:
+ case SQ_ALU_CONST_CACHE_HS_13:
+ case SQ_ALU_CONST_CACHE_HS_14:
+ case SQ_ALU_CONST_CACHE_HS_15:
+ case SQ_ALU_CONST_CACHE_LS_0:
+ case SQ_ALU_CONST_CACHE_LS_1:
+ case SQ_ALU_CONST_CACHE_LS_2:
+ case SQ_ALU_CONST_CACHE_LS_3:
+ case SQ_ALU_CONST_CACHE_LS_4:
+ case SQ_ALU_CONST_CACHE_LS_5:
+ case SQ_ALU_CONST_CACHE_LS_6:
+ case SQ_ALU_CONST_CACHE_LS_7:
+ case SQ_ALU_CONST_CACHE_LS_8:
+ case SQ_ALU_CONST_CACHE_LS_9:
+ case SQ_ALU_CONST_CACHE_LS_10:
+ case SQ_ALU_CONST_CACHE_LS_11:
+ case SQ_ALU_CONST_CACHE_LS_12:
+ case SQ_ALU_CONST_CACHE_LS_13:
+ case SQ_ALU_CONST_CACHE_LS_14:
+ case SQ_ALU_CONST_CACHE_LS_15:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * evergreen_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap)
+{
+ /* XXX fill in */
+ return 0;
+}
+
+static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_reloc *reloc;
+ struct evergreen_cs_track *track;
+ volatile u32 *ib;
+ unsigned idx;
+ unsigned i;
+ unsigned start_reg, end_reg, reg;
+ int r;
+ u32 idx_value;
+
+ track = (struct evergreen_cs_track *)p->track;
+ ib = p->ib->ptr;
+ idx = pkt->idx + 1;
+ idx_value = radeon_get_ib_value(p, idx);
+
+ switch (pkt->opcode) {
+ case PACKET3_CONTEXT_CONTROL:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad CONTEXT_CONTROL\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_CLEAR_STATE:
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_BASE:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_2:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+ ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_AUTO:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_IMMD:
+ if (pkt->count < 2) {
+ DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_WAIT_REG_MEM:
+ if (pkt->count != 5) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ /* bit 4 is reg (0) or mem (1) */
+ if (idx_value & 0x10) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ }
+ break;
+ case PACKET3_SURFACE_SYNC:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ /* 0xffffffff/0x0 is flush all cache flag */
+ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+ radeon_get_ib_value(p, idx + 2) != 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
+ case PACKET3_EVENT_WRITE:
+ if (pkt->count != 2 && pkt->count != 0) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ }
+ break;
+ case PACKET3_EVENT_WRITE_EOP:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case PACKET3_EVENT_WRITE_EOS:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_CONTEXT_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
+ (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_RESOURCE:
+ if (pkt->count % 8) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_RESOURCE_START) ||
+ (start_reg >= PACKET3_SET_RESOURCE_END) ||
+ (end_reg >= PACKET3_SET_RESOURCE_END)) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < (pkt->count / 8); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 size, offset;
+
+ switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+ case SQ_TEX_VTX_VALID_TEXTURE:
+ /* tex base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ texture = reloc->robj;
+ /* tex mip base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ r = evergreen_check_texture_resource(p, idx+1+(i*8),
+ texture, mipmap);
+ if (r)
+ return r;
+ break;
+ case SQ_TEX_VTX_VALID_BUFFER:
+ /* vtx base */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*8)+1);
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
+ }
+ ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
+ ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ break;
+ case SQ_TEX_VTX_INVALID_TEXTURE:
+ case SQ_TEX_VTX_INVALID_BUFFER:
+ default:
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case PACKET3_SET_ALU_CONST:
+ /* XXX fix me ALU const buffers only */
+ break;
+ case PACKET3_SET_BOOL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
+ (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+ (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+ DRM_ERROR("bad SET_BOOL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_LOOP_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
+ (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+ (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+ DRM_ERROR("bad SET_LOOP_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CTL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
+ (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+ (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+ DRM_ERROR("bad SET_CTL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_SAMPLER:
+ if (pkt->count % 3) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_SAMPLER_START) ||
+ (start_reg >= PACKET3_SET_SAMPLER_END) ||
+ (end_reg >= PACKET3_SET_SAMPLER_END)) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct evergreen_cs_track *track;
+ int r;
+
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ evergreen_cs_track_init(track);
+ track->npipes = p->rdev->config.evergreen.tiling_npipes;
+ track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
+ track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ p->track = track;
+ }
+ do {
+ r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ r = evergreen_cs_parse_packet0(p, &pkt);
+ break;
+ case PACKET_TYPE2:
+ break;
+ case PACKET_TYPE3:
+ r = evergreen_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
+ return -EINVAL;
+ }
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ mdelay(1);
+ }
+#endif
+ kfree(p->track);
+ p->track = NULL;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index af86af8..e028c1c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -151,6 +151,9 @@
#define EVERGREEN_DATA_FORMAT 0x6b00
# define EVERGREEN_INTERLEAVE_EN (1 << 0)
#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
+#define EVERGREEN_VLINE_START_END 0x6b08
+#define EVERGREEN_VLINE_STATUS 0x6bb8
+# define EVERGREEN_VLINE_STAT (1 << 12)
#define EVERGREEN_VIEWPORT_START 0x6d70
#define EVERGREEN_VIEWPORT_SIZE 0x6d74
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 93e9e17ad..79683f6 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -218,6 +218,8 @@
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
#define PA_SC_AA_CONFIG 0x28C04
+#define MSAA_NUM_SAMPLES_SHIFT 0
+#define MSAA_NUM_SAMPLES_MASK 0x3
#define PA_SC_CLIPRECT_RULE 0x2820C
#define PA_SC_EDGERULE 0x28230
#define PA_SC_FIFO_SIZE 0x8BCC
@@ -553,4 +555,466 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/*
+ * PM4
+ */
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_VC_ACTION_ENA (1 << 24)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SMX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+#define PACKET3_RB_OFFSET 0x4B
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+
+#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
+#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
+#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
+#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
+#define SQ_TEX_VTX_INVALID_BUFFER 0x1
+#define SQ_TEX_VTX_VALID_TEXTURE 0x2
+#define SQ_TEX_VTX_VALID_BUFFER 0x3
+
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define VGT_TF_RING_SIZE 0x8988
+
+#define SQ_ESGS_RING_ITEMSIZE 0x28900
+#define SQ_GSVS_RING_ITEMSIZE 0x28904
+#define SQ_ESTMP_RING_ITEMSIZE 0x28908
+#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
+#define SQ_VSTMP_RING_ITEMSIZE 0x28910
+#define SQ_PSTMP_RING_ITEMSIZE 0x28914
+#define SQ_LSTMP_RING_ITEMSIZE 0x28830
+#define SQ_HSTMP_RING_ITEMSIZE 0x28834
+
+#define SQ_GS_VERT_ITEMSIZE 0x2891c
+#define SQ_GS_VERT_ITEMSIZE_1 0x28920
+#define SQ_GS_VERT_ITEMSIZE_2 0x28924
+#define SQ_GS_VERT_ITEMSIZE_3 0x28928
+#define SQ_GSVS_RING_OFFSET_1 0x2892c
+#define SQ_GSVS_RING_OFFSET_2 0x28930
+#define SQ_GSVS_RING_OFFSET_3 0x28934
+
+#define SQ_ALU_CONST_CACHE_PS_0 0x28940
+#define SQ_ALU_CONST_CACHE_PS_1 0x28944
+#define SQ_ALU_CONST_CACHE_PS_2 0x28948
+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4 0x28950
+#define SQ_ALU_CONST_CACHE_PS_5 0x28954
+#define SQ_ALU_CONST_CACHE_PS_6 0x28958
+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8 0x28960
+#define SQ_ALU_CONST_CACHE_PS_9 0x28964
+#define SQ_ALU_CONST_CACHE_PS_10 0x28968
+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12 0x28970
+#define SQ_ALU_CONST_CACHE_PS_13 0x28974
+#define SQ_ALU_CONST_CACHE_PS_14 0x28978
+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0 0x28980
+#define SQ_ALU_CONST_CACHE_VS_1 0x28984
+#define SQ_ALU_CONST_CACHE_VS_2 0x28988
+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4 0x28990
+#define SQ_ALU_CONST_CACHE_VS_5 0x28994
+#define SQ_ALU_CONST_CACHE_VS_6 0x28998
+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
+#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
+#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
+#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
+#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
+#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
+#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
+#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
+#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
+#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
+#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
+#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
+#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
+#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
+#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
+#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
+#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
+#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
+#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
+#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
+#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
+#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
+#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
+#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
+#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
+#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
+#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
+#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
+#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
+#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
+#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
+#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
+#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
+
+#define DB_DEPTH_CONTROL 0x28800
+#define DB_DEPTH_VIEW 0x28008
+#define DB_HTILE_DATA_BASE 0x28014
+#define DB_Z_INFO 0x28040
+# define Z_ARRAY_MODE(x) ((x) << 4)
+#define DB_STENCIL_INFO 0x28044
+#define DB_Z_READ_BASE 0x28048
+#define DB_STENCIL_READ_BASE 0x2804c
+#define DB_Z_WRITE_BASE 0x28050
+#define DB_STENCIL_WRITE_BASE 0x28054
+#define DB_DEPTH_SIZE 0x28058
+
+#define SQ_PGM_START_PS 0x28840
+#define SQ_PGM_START_VS 0x2885c
+#define SQ_PGM_START_GS 0x28874
+#define SQ_PGM_START_ES 0x2888c
+#define SQ_PGM_START_FS 0x288a4
+#define SQ_PGM_START_HS 0x288b8
+#define SQ_PGM_START_LS 0x288d0
+
+#define VGT_STRMOUT_CONFIG 0x28b94
+#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
+
+#define CB_TARGET_MASK 0x28238
+#define CB_SHADER_MASK 0x2823c
+
+#define GDS_ADDR_BASE 0x28720
+
+#define CB_IMMED0_BASE 0x28b9c
+#define CB_IMMED1_BASE 0x28ba0
+#define CB_IMMED2_BASE 0x28ba4
+#define CB_IMMED3_BASE 0x28ba8
+#define CB_IMMED4_BASE 0x28bac
+#define CB_IMMED5_BASE 0x28bb0
+#define CB_IMMED6_BASE 0x28bb4
+#define CB_IMMED7_BASE 0x28bb8
+#define CB_IMMED8_BASE 0x28bbc
+#define CB_IMMED9_BASE 0x28bc0
+#define CB_IMMED10_BASE 0x28bc4
+#define CB_IMMED11_BASE 0x28bc8
+
+/* all 12 CB blocks have these regs */
+#define CB_COLOR0_BASE 0x28c60
+#define CB_COLOR0_PITCH 0x28c64
+#define CB_COLOR0_SLICE 0x28c68
+#define CB_COLOR0_VIEW 0x28c6c
+#define CB_COLOR0_INFO 0x28c70
+# define CB_ARRAY_MODE(x) ((x) << 8)
+# define ARRAY_LINEAR_GENERAL 0
+# define ARRAY_LINEAR_ALIGNED 1
+# define ARRAY_1D_TILED_THIN1 2
+# define ARRAY_2D_TILED_THIN1 4
+#define CB_COLOR0_ATTRIB 0x28c74
+#define CB_COLOR0_DIM 0x28c78
+/* only CB0-7 blocks have these regs */
+#define CB_COLOR0_CMASK 0x28c7c
+#define CB_COLOR0_CMASK_SLICE 0x28c80
+#define CB_COLOR0_FMASK 0x28c84
+#define CB_COLOR0_FMASK_SLICE 0x28c88
+#define CB_COLOR0_CLEAR_WORD0 0x28c8c
+#define CB_COLOR0_CLEAR_WORD1 0x28c90
+#define CB_COLOR0_CLEAR_WORD2 0x28c94
+#define CB_COLOR0_CLEAR_WORD3 0x28c98
+
+#define CB_COLOR1_BASE 0x28c9c
+#define CB_COLOR2_BASE 0x28cd8
+#define CB_COLOR3_BASE 0x28d14
+#define CB_COLOR4_BASE 0x28d50
+#define CB_COLOR5_BASE 0x28d8c
+#define CB_COLOR6_BASE 0x28dc8
+#define CB_COLOR7_BASE 0x28e04
+#define CB_COLOR8_BASE 0x28e40
+#define CB_COLOR9_BASE 0x28e5c
+#define CB_COLOR10_BASE 0x28e78
+#define CB_COLOR11_BASE 0x28e94
+
+#define CB_COLOR1_PITCH 0x28ca0
+#define CB_COLOR2_PITCH 0x28cdc
+#define CB_COLOR3_PITCH 0x28d18
+#define CB_COLOR4_PITCH 0x28d54
+#define CB_COLOR5_PITCH 0x28d90
+#define CB_COLOR6_PITCH 0x28dcc
+#define CB_COLOR7_PITCH 0x28e08
+#define CB_COLOR8_PITCH 0x28e44
+#define CB_COLOR9_PITCH 0x28e60
+#define CB_COLOR10_PITCH 0x28e7c
+#define CB_COLOR11_PITCH 0x28e98
+
+#define CB_COLOR1_SLICE 0x28ca4
+#define CB_COLOR2_SLICE 0x28ce0
+#define CB_COLOR3_SLICE 0x28d1c
+#define CB_COLOR4_SLICE 0x28d58
+#define CB_COLOR5_SLICE 0x28d94
+#define CB_COLOR6_SLICE 0x28dd0
+#define CB_COLOR7_SLICE 0x28e0c
+#define CB_COLOR8_SLICE 0x28e48
+#define CB_COLOR9_SLICE 0x28e64
+#define CB_COLOR10_SLICE 0x28e80
+#define CB_COLOR11_SLICE 0x28e9c
+
+#define CB_COLOR1_VIEW 0x28ca8
+#define CB_COLOR2_VIEW 0x28ce4
+#define CB_COLOR3_VIEW 0x28d20
+#define CB_COLOR4_VIEW 0x28d5c
+#define CB_COLOR5_VIEW 0x28d98
+#define CB_COLOR6_VIEW 0x28dd4
+#define CB_COLOR7_VIEW 0x28e10
+#define CB_COLOR8_VIEW 0x28e4c
+#define CB_COLOR9_VIEW 0x28e68
+#define CB_COLOR10_VIEW 0x28e84
+#define CB_COLOR11_VIEW 0x28ea0
+
+#define CB_COLOR1_INFO 0x28cac
+#define CB_COLOR2_INFO 0x28ce8
+#define CB_COLOR3_INFO 0x28d24
+#define CB_COLOR4_INFO 0x28d60
+#define CB_COLOR5_INFO 0x28d9c
+#define CB_COLOR6_INFO 0x28dd8
+#define CB_COLOR7_INFO 0x28e14
+#define CB_COLOR8_INFO 0x28e50
+#define CB_COLOR9_INFO 0x28e6c
+#define CB_COLOR10_INFO 0x28e88
+#define CB_COLOR11_INFO 0x28ea4
+
+#define CB_COLOR1_ATTRIB 0x28cb0
+#define CB_COLOR2_ATTRIB 0x28cec
+#define CB_COLOR3_ATTRIB 0x28d28
+#define CB_COLOR4_ATTRIB 0x28d64
+#define CB_COLOR5_ATTRIB 0x28da0
+#define CB_COLOR6_ATTRIB 0x28ddc
+#define CB_COLOR7_ATTRIB 0x28e18
+#define CB_COLOR8_ATTRIB 0x28e54
+#define CB_COLOR9_ATTRIB 0x28e70
+#define CB_COLOR10_ATTRIB 0x28e8c
+#define CB_COLOR11_ATTRIB 0x28ea8
+
+#define CB_COLOR1_DIM 0x28cb4
+#define CB_COLOR2_DIM 0x28cf0
+#define CB_COLOR3_DIM 0x28d2c
+#define CB_COLOR4_DIM 0x28d68
+#define CB_COLOR5_DIM 0x28da4
+#define CB_COLOR6_DIM 0x28de0
+#define CB_COLOR7_DIM 0x28e1c
+#define CB_COLOR8_DIM 0x28e58
+#define CB_COLOR9_DIM 0x28e74
+#define CB_COLOR10_DIM 0x28e90
+#define CB_COLOR11_DIM 0x28eac
+
+#define CB_COLOR1_CMASK 0x28cb8
+#define CB_COLOR2_CMASK 0x28cf4
+#define CB_COLOR3_CMASK 0x28d30
+#define CB_COLOR4_CMASK 0x28d6c
+#define CB_COLOR5_CMASK 0x28da8
+#define CB_COLOR6_CMASK 0x28de4
+#define CB_COLOR7_CMASK 0x28e20
+
+#define CB_COLOR1_CMASK_SLICE 0x28cbc
+#define CB_COLOR2_CMASK_SLICE 0x28cf8
+#define CB_COLOR3_CMASK_SLICE 0x28d34
+#define CB_COLOR4_CMASK_SLICE 0x28d70
+#define CB_COLOR5_CMASK_SLICE 0x28dac
+#define CB_COLOR6_CMASK_SLICE 0x28de8
+#define CB_COLOR7_CMASK_SLICE 0x28e24
+
+#define CB_COLOR1_FMASK 0x28cc0
+#define CB_COLOR2_FMASK 0x28cfc
+#define CB_COLOR3_FMASK 0x28d38
+#define CB_COLOR4_FMASK 0x28d74
+#define CB_COLOR5_FMASK 0x28db0
+#define CB_COLOR6_FMASK 0x28dec
+#define CB_COLOR7_FMASK 0x28e28
+
+#define CB_COLOR1_FMASK_SLICE 0x28cc4
+#define CB_COLOR2_FMASK_SLICE 0x28d00
+#define CB_COLOR3_FMASK_SLICE 0x28d3c
+#define CB_COLOR4_FMASK_SLICE 0x28d78
+#define CB_COLOR5_FMASK_SLICE 0x28db4
+#define CB_COLOR6_FMASK_SLICE 0x28df0
+#define CB_COLOR7_FMASK_SLICE 0x28e2c
+
+#define CB_COLOR1_CLEAR_WORD0 0x28cc8
+#define CB_COLOR2_CLEAR_WORD0 0x28d04
+#define CB_COLOR3_CLEAR_WORD0 0x28d40
+#define CB_COLOR4_CLEAR_WORD0 0x28d7c
+#define CB_COLOR5_CLEAR_WORD0 0x28db8
+#define CB_COLOR6_CLEAR_WORD0 0x28df4
+#define CB_COLOR7_CLEAR_WORD0 0x28e30
+
+#define CB_COLOR1_CLEAR_WORD1 0x28ccc
+#define CB_COLOR2_CLEAR_WORD1 0x28d08
+#define CB_COLOR3_CLEAR_WORD1 0x28d44
+#define CB_COLOR4_CLEAR_WORD1 0x28d80
+#define CB_COLOR5_CLEAR_WORD1 0x28dbc
+#define CB_COLOR6_CLEAR_WORD1 0x28df8
+#define CB_COLOR7_CLEAR_WORD1 0x28e34
+
+#define CB_COLOR1_CLEAR_WORD2 0x28cd0
+#define CB_COLOR2_CLEAR_WORD2 0x28d0c
+#define CB_COLOR3_CLEAR_WORD2 0x28d48
+#define CB_COLOR4_CLEAR_WORD2 0x28d84
+#define CB_COLOR5_CLEAR_WORD2 0x28dc0
+#define CB_COLOR6_CLEAR_WORD2 0x28dfc
+#define CB_COLOR7_CLEAR_WORD2 0x28e38
+
+#define CB_COLOR1_CLEAR_WORD3 0x28cd4
+#define CB_COLOR2_CLEAR_WORD3 0x28d10
+#define CB_COLOR3_CLEAR_WORD3 0x28d4c
+#define CB_COLOR4_CLEAR_WORD3 0x28d88
+#define CB_COLOR5_CLEAR_WORD3 0x28dc4
+#define CB_COLOR6_CLEAR_WORD3 0x28e00
+#define CB_COLOR7_CLEAR_WORD3 0x28e3c
+
+#define SQ_TEX_RESOURCE_WORD0_0 0x30000
+#define SQ_TEX_RESOURCE_WORD1_0 0x30004
+# define TEX_ARRAY_MODE(x) ((x) << 28)
+#define SQ_TEX_RESOURCE_WORD2_0 0x30008
+#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
+#define SQ_TEX_RESOURCE_WORD4_0 0x30010
+#define SQ_TEX_RESOURCE_WORD5_0 0x30014
+#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+
+
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cc004b0..cf89aa2 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -162,6 +162,11 @@ void r100_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -172,6 +177,11 @@ void r100_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4415a5e..e6c8914 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -45,9 +45,14 @@ void r420_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -58,6 +63,11 @@ void r420_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 44e96a2..0e91871 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -291,6 +291,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
@@ -301,6 +306,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
@@ -317,6 +327,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
@@ -327,6 +342,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
@@ -343,6 +363,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
@@ -353,6 +378,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
@@ -375,6 +405,11 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -385,6 +420,11 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@ -401,7 +441,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
@@ -411,7 +456,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
@@ -430,14 +480,30 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
} else {
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ }
+ /* mid sh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
}
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
@@ -453,14 +519,30 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
} else {
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ }
+ /* mid mh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
}
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
@@ -475,7 +557,18 @@ void r600_pm_init_profile(struct radeon_device *rdev)
void r600_pm_misc(struct radeon_device *rdev)
{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
}
bool r600_gui_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 669feb6..8e1d44c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -176,6 +176,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
/*
* Fences.
@@ -647,15 +648,18 @@ enum radeon_pm_profile_type {
PM_PROFILE_DEFAULT,
PM_PROFILE_AUTO,
PM_PROFILE_LOW,
+ PM_PROFILE_MID,
PM_PROFILE_HIGH,
};
#define PM_PROFILE_DEFAULT_IDX 0
#define PM_PROFILE_LOW_SH_IDX 1
-#define PM_PROFILE_HIGH_SH_IDX 2
-#define PM_PROFILE_LOW_MH_IDX 3
-#define PM_PROFILE_HIGH_MH_IDX 4
-#define PM_PROFILE_MAX 5
+#define PM_PROFILE_MID_SH_IDX 2
+#define PM_PROFILE_HIGH_SH_IDX 3
+#define PM_PROFILE_LOW_MH_IDX 4
+#define PM_PROFILE_MID_MH_IDX 5
+#define PM_PROFILE_HIGH_MH_IDX 6
+#define PM_PROFILE_MAX 7
struct radeon_pm_profile {
int dpms_off_ps_idx;
@@ -744,6 +748,7 @@ struct radeon_pm {
int default_power_state_index;
u32 current_sclk;
u32 current_mclk;
+ u32 current_vddc;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e57df08..87f7e2c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = {
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = NULL,
- .cs_parse = NULL,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
.copy_blit = NULL,
.copy_dma = NULL,
.copy = NULL,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5c40a3d..c0bbaa6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
+extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 24ea683..99bd8a9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1538,7 +1538,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].pcie_lanes =
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1605,7 +1606,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1679,7 +1681,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1755,9 +1758,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].misc2 = 0;
}
} else {
+ int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ uint8_t fw_frev, fw_crev;
+ uint16_t fw_data_offset, vddc = 0;
+ union firmware_info *firmware_info;
+ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+
+ if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
+ &fw_frev, &fw_crev, &fw_data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ fw_data_offset);
+ vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ }
+
/* add the i2c bus for thermal/fan chip */
/* no support for internal controller yet */
- ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
if (controller->ucType > 0) {
if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
(controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
@@ -1817,10 +1833,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
+ /* voltage works differently on IGPs */
mode_index++;
} else if (ASIC_IS_DCE4(rdev)) {
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
@@ -1904,6 +1917,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ /* patch the table values with the default slck/mclk from firmware info */
+ for (j = 0; j < mode_index; j++) {
+ rdev->pm.power_state[state_index].clock_info[j].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[j].sclk =
+ rdev->clock.default_sclk;
+ if (vddc)
+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+ vddc;
+ }
}
state_index++;
}
@@ -1943,6 +1966,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
@@ -1998,6 +2022,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+union set_voltage {
+ struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+ struct _SET_VOLTAGE_PARAMETERS v1;
+ struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+};
+
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev, volt_index = level;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (crev) {
+ case 1:
+ args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+ args.v1.ucVoltageIndex = volt_index;
+ break;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+ args.v2.usVoltageLevel = cpu_to_le16(level);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+
+
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b5e10d..1bee2f9 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2026,6 +2026,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
break;
default:
+ ddc_i2c.valid = false;
break;
}
@@ -2339,6 +2340,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (RBIOS8(tv_info + 6) == 'T') {
if (radeon_apply_legacy_tv_quirks(dev)) {
hpd.hpd = RADEON_HPD_NONE;
+ ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev,
@@ -2454,7 +2456,12 @@ default_mode:
rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if ((state_index > 0) &&
+ (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
+ rdev->pm.power_state[state_index].clock_info[0].voltage =
+ rdev->pm.power_state[0].clock_info[0].voltage;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
rdev->pm.power_state[state_index].pcie_lanes = 16;
rdev->pm.power_state[state_index].flags = 0;
rdev->pm.default_power_state_index = state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index fdc3fdf7..f10faed 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
rdev->powered_down = false;
radeon_resume_kms(dev);
+ drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_INFO "radeon: switched off\n");
+ drm_kms_helper_poll_disable(dev);
radeon_suspend_kms(dev, pmm);
/* don't suspend or resume card normally */
rdev->powered_down = true;
@@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
+ struct drm_connector *connector;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (rdev->powered_down)
return 0;
+
+ /* turn off display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1006549..8154cdf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -284,8 +284,7 @@ static const char *connector_names[15] = {
"eDP",
};
-static const char *hpd_names[7] = {
- "NONE",
+static const char *hpd_names[6] = {
"HPD1",
"HPD2",
"HPD3",
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 902d173..e166fe4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -45,9 +45,10 @@
* - 2.2.0 - add r6xx/r7xx const buffer support
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
* - 2.4.0 - add crtc id query
+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 4
+#define KMS_DRIVER_MINOR 5
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index e192acf..dc1634b 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -363,6 +363,7 @@ int radeon_fbdev_init(struct radeon_device *rdev)
{
struct radeon_fbdev *rfbdev;
int bpp_sel = 32;
+ int ret;
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
@@ -376,9 +377,14 @@ int radeon_fbdev_init(struct radeon_device *rdev)
rdev->mode_info.rfbdev = rfbdev;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
- drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
- rdev->num_crtc,
- RADEONFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT);
+ if (ret) {
+ kfree(rfbdev);
+ return ret;
+ }
+
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0406835..6a70c0d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -118,7 +118,11 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
value = rdev->num_z_pipes;
break;
case RADEON_INFO_ACCEL_WORKING:
- value = rdev->accel_working;
+ /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
+ value = false;
+ else
+ value = rdev->accel_working;
break;
case RADEON_INFO_CRTC_FROM_ID:
for (i = 0, found = 0; i < rdev->num_crtc; i++) {
@@ -134,6 +138,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_ACCEL_WORKING2:
+ value = rdev->accel_working;
+ break;
default:
DRM_DEBUG("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5a13b3e..5b07b88 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1168,6 +1168,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
bool color = true;
+ struct drm_crtc *crtc;
+
+ /* find out if crtc2 is in use or if this encoder is using it */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
+ if (encoder->crtc != crtc) {
+ return connector_status_disconnected;
+ }
+ }
+ }
if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a8d162c..63f679a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -33,6 +33,14 @@
#define RADEON_WAIT_VBLANK_TIMEOUT 200
#define RADEON_WAIT_IDLE_TIMEOUT 200
+static const char *radeon_pm_state_type_name[5] = {
+ "Default",
+ "Powersave",
+ "Battery",
+ "Balanced",
+ "Performance",
+};
+
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
@@ -84,9 +92,9 @@ static void radeon_pm_update_profile(struct radeon_device *rdev)
rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
} else {
if (rdev->pm.active_crtc_count > 1)
- rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
else
- rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
}
break;
case PM_PROFILE_LOW:
@@ -95,6 +103,12 @@ static void radeon_pm_update_profile(struct radeon_device *rdev)
else
rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
break;
+ case PM_PROFILE_MID:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+ break;
case PM_PROFILE_HIGH:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
@@ -127,15 +141,6 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
ttm_bo_unmap_virtual(&bo->tbo);
}
-
- if (rdev->gart.table.vram.robj)
- ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
-
- if (rdev->stollen_vga_memory)
- ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
-
- if (rdev->r600_blit.shader_obj)
- ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
}
static void radeon_sync_with_vblank(struct radeon_device *rdev)
@@ -151,6 +156,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev)
static void radeon_set_power_state(struct radeon_device *rdev)
{
u32 sclk, mclk;
+ bool misc_after = false;
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
@@ -167,55 +173,47 @@ static void radeon_set_power_state(struct radeon_device *rdev)
if (mclk > rdev->clock.default_mclk)
mclk = rdev->clock.default_mclk;
- /* voltage, pcie lanes, etc.*/
- radeon_pm_misc(rdev);
+ /* upvolt before raising clocks, downvolt after lowering clocks */
+ if (sclk < rdev->pm.current_sclk)
+ misc_after = true;
- if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- radeon_sync_with_vblank(rdev);
+ radeon_sync_with_vblank(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (!radeon_pm_in_vbl(rdev))
return;
+ }
- radeon_pm_prepare(rdev);
- /* set engine clock */
- if (sclk != rdev->pm.current_sclk) {
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_engine_clock(rdev, sclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
- rdev->pm.current_sclk = sclk;
- DRM_DEBUG("Setting: e: %d\n", sclk);
- }
+ radeon_pm_prepare(rdev);
- /* set memory clock */
- if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_memory_clock(rdev, mclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
- rdev->pm.current_mclk = mclk;
- DRM_DEBUG("Setting: m: %d\n", mclk);
- }
- radeon_pm_finish(rdev);
- } else {
- /* set engine clock */
- if (sclk != rdev->pm.current_sclk) {
- radeon_sync_with_vblank(rdev);
- radeon_pm_prepare(rdev);
- radeon_set_engine_clock(rdev, sclk);
- radeon_pm_finish(rdev);
- rdev->pm.current_sclk = sclk;
- DRM_DEBUG("Setting: e: %d\n", sclk);
- }
- /* set memory clock */
- if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
- radeon_sync_with_vblank(rdev);
- radeon_pm_prepare(rdev);
- radeon_set_memory_clock(rdev, mclk);
- radeon_pm_finish(rdev);
- rdev->pm.current_mclk = mclk;
- DRM_DEBUG("Setting: m: %d\n", mclk);
- }
+ if (!misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
}
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+
+ if (misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ radeon_pm_finish(rdev);
+
rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
} else
@@ -288,6 +286,42 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_unlock(&rdev->ddev->struct_mutex);
}
+static void radeon_pm_print_states(struct radeon_device *rdev)
+{
+ int i, j;
+ struct radeon_power_state *power_state;
+ struct radeon_pm_clock_info *clock_info;
+
+ DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states);
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ power_state = &rdev->pm.power_state[i];
+ DRM_DEBUG("State %d: %s\n", i,
+ radeon_pm_state_type_name[power_state->type]);
+ if (i == rdev->pm.default_power_state_index)
+ DRM_DEBUG("\tDefault");
+ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+ DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+ if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ DRM_DEBUG("\tSingle display only\n");
+ DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+ for (j = 0; j < power_state->num_clock_modes; j++) {
+ clock_info = &(power_state->clock_info[j]);
+ if (rdev->flags & RADEON_IS_IGP)
+ DRM_DEBUG("\t\t%d e: %d%s\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ else
+ DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->mclk * 10,
+ clock_info->voltage.voltage,
+ clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+ }
+ }
+}
+
static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -318,6 +352,8 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
rdev->pm.profile = PM_PROFILE_AUTO;
else if (strncmp("low", buf, strlen("low")) == 0)
rdev->pm.profile = PM_PROFILE_LOW;
+ else if (strncmp("mid", buf, strlen("mid")) == 0)
+ rdev->pm.profile = PM_PROFILE_MID;
else if (strncmp("high", buf, strlen("high")) == 0)
rdev->pm.profile = PM_PROFILE_HIGH;
else {
@@ -384,15 +420,19 @@ void radeon_pm_suspend(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- rdev->pm.current_power_state_index = -1;
- rdev->pm.current_clock_mode_index = -1;
- rdev->pm.current_sclk = 0;
- rdev->pm.current_mclk = 0;
mutex_unlock(&rdev->pm.mutex);
}
void radeon_pm_resume(struct radeon_device *rdev)
{
+ /* asic init will reset the default power state */
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
}
@@ -401,32 +441,24 @@ int radeon_pm_init(struct radeon_device *rdev)
int ret;
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
- rdev->pm.current_sclk = 0;
- rdev->pm.current_mclk = 0;
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
if (rdev->bios) {
if (rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
radeon_combios_get_power_modes(rdev);
+ radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
- rdev->pm.current_power_state_index = -1;
- rdev->pm.current_clock_mode_index = -1;
}
if (rdev->pm.num_power_states > 1) {
- if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.profile = PM_PROFILE_DEFAULT;
- radeon_pm_update_profile(rdev);
- radeon_pm_set_clocks(rdev);
- mutex_unlock(&rdev->pm.mutex);
- }
-
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
if (ret)
@@ -712,6 +744,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ if (rdev->pm.current_vddc)
+ seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
if (rdev->asic->get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 0000000..b5c757f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,611 @@
+evergreen 0x9400
+0x00008040 WAIT_UNTIL
+0x00008044 WAIT_UNTIL_POLL_CNTL
+0x00008048 WAIT_UNTIL_POLL_MASK
+0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C0C SQ_GPR_RESOURCE_MGMT_3
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008C18 SQ_THREAD_RESOURCE_MGMT
+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
+0x00008C20 SQ_STACK_RESOURCE_MGMT_1
+0x00008C24 SQ_STACK_RESOURCE_MGMT_2
+0x00008C28 SQ_STACK_RESOURCE_MGMT_3
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009700 VC_CNTL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x0002805C DB_DEPTH_SLICE
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x0002885C SQ_PGM_RESOURCES_VS
+0x00028860 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D0 SQ_PGM_RESOURCES_LS
+0x000288D4 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028ABC DB_HTILE_SURFACE
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B5C VGT_LS_SIZE
+0x00028B60 VGT_HS_SIZE
+0x00028B64 VGT_LS_HS_ALLOC
+0x00028B68 VGT_HS_PATCH_CONST
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028C00 PA_SC_LINE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+0x00028C3C PA_SC_AA_MASK
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 79887ca..7bb4c3e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev)
if (voltage->delay)
udelay(voltage->delay);
}
- }
+ } else if (voltage->type == VOLTAGE_VDDC)
+ radeon_atom_set_voltage(rdev, voltage->vddc_id);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 253f24a..cec536c 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -44,7 +44,18 @@ void rv770_fini(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev)
{
-
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
}
/*
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 0d9a42c..ef91069 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -77,7 +77,7 @@ struct ttm_page_pool {
/**
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialiazation to access them is pointless.
+ * so forcing serialization to access them is pointless.
*/
struct ttm_pool_opts {
@@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
- printk(KERN_ERR "[ttm] Setting allocation size to %lu "
- "is not allowed. Recomended size is "
- "%lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
- printk(KERN_WARNING "[ttm] Setting allocation size to "
- "larger than %lu is not recomended.\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
@@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
- printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+ printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
@@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate memory for pool free operation.\n");
return 0;
}
@@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void)
}
/**
- * Calback for mm to request pool to reduce number of page held.
+ * Callback for mm to request pool to reduce number of page held.
*/
static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
{
@@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
- printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
- cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to uc!\n",
+ cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
- printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
- cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to wc!\n",
+ cpages);
break;
default:
break;
@@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
struct page **failed_pages, unsigned cpages)
{
unsigned i;
- /* Failed pages has to be reed */
+ /* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
list_del(&failed_pages[i]->lru);
__free_page(failed_pages[i]);
@@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate table for new pages.");
return -ENOMEM;
}
@@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
p = alloc_page(gfp_flags);
if (!p) {
- printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+ printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
- r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
@@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+ printk(KERN_ERR TTM_PFX
+ "Failed to fill pool (%p).", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
@@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
- gfp_flags |= __GFP_HIGHMEM;
+ gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
- printk(KERN_ERR "[ttm] unable to allocate page.");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate page.");
return -ENOMEM;
}
@@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
- printk(KERN_ERR "[ttm] Failed to allocate extra pages "
- "for large request.");
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate extra pages "
+ "for large request.");
ttm_put_pages(pages, 0, flags, cstate);
return r;
}
@@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
return 0;
- printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+ printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
@@ -813,7 +823,7 @@ void ttm_page_alloc_fini()
if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
return;
- printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+ printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
ttm_pool_mm_shrink_fini(&_manager);
for (i = 0; i < NUM_POOLS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 1a3cb68..4505e17 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o
+ vmwgfx_overlay.o vmwgfx_fence.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0c9c081..b793c8c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -88,6 +88,9 @@
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
+#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
+ struct drm_vmw_update_layout_arg)
/**
@@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
- DRM_AUTH | DRM_UNLOCKED)
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
static struct pci_device_id vmw_pci_id_list[] = {
@@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err3;
}
+ /* Need mmio memory to check for fifo pitchlock cap. */
+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+ !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
+ !vmw_fifo_have_pitchlock(dev_priv)) {
+ ret = -ENOSYS;
+ DRM_ERROR("Hardware has no pitchlock\n");
+ goto out_err4;
+ }
+
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12);
@@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
-
unregister_pm_notifier(&dev_priv->pm_nb);
vmw_fb_close(dev_priv);
@@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev,
{
struct vmw_master *vmaster;
- DRM_INFO("Master create.\n");
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(vmaster == NULL))
return -ENOMEM;
@@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev,
{
struct vmw_master *vmaster = vmw_master(master);
- DRM_INFO("Master destroy.\n");
master->driver_priv = NULL;
kfree(vmaster);
}
@@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
- DRM_INFO("Master set.\n");
-
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- DRM_INFO("Master drop.\n");
-
/**
* Make sure the master doesn't disappear while we have
* it locked.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 356dc93..eaad520 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,12 +41,13 @@
#define VMWGFX_DRIVER_DATE "20100209"
#define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_MINOR 2
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_GMRS 2048
+#define VMWGFX_MAX_DISPLAYS 16
struct vmw_fpriv {
struct drm_master *locked_master;
@@ -102,6 +103,13 @@ struct vmw_surface {
struct vmw_cursor_snooper snooper;
};
+struct vmw_fence_queue {
+ struct list_head head;
+ struct timespec lag;
+ struct timespec lag_time;
+ spinlock_t lock;
+};
+
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
@@ -115,6 +123,7 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
+ struct vmw_fence_queue fence_queue;
};
struct vmw_relocation {
@@ -144,6 +153,14 @@ struct vmw_master {
struct ttm_lock lock;
};
+struct vmw_vga_topology_state {
+ uint32_t width;
+ uint32_t height;
+ uint32_t primary;
+ uint32_t pos_x;
+ uint32_t pos_y;
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -171,14 +188,19 @@ struct vmw_private {
* VGA registers.
*/
+ struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
uint32_t vga_depth;
uint32_t vga_bpp;
uint32_t vga_pseudo;
uint32_t vga_red_mask;
- uint32_t vga_blue_mask;
uint32_t vga_green_mask;
+ uint32_t vga_blue_mask;
+ uint32_t vga_bpl;
+ uint32_t vga_pitchlock;
+
+ uint32_t num_displays;
/*
* Framebuffer info.
@@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t sequence,
bool interruptible,
unsigned long timeout);
+extern void vmw_update_sequence(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo_state);
+
+
+/**
+ * Rudimentary fence objects currently used only for throttling -
+ * vmwgfx_fence.c
+ */
+
+extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
+extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
+extern int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence);
+extern int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence);
+extern int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_fence_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bbp, unsigned depth);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/**
* Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dbd36b8..8e39685 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = copy_from_user(cmd, user_cmd, arg->command_size);
if (unlikely(ret != 0)) {
+ ret = -EFAULT;
DRM_ERROR("Failed copying commands.\n");
goto out_commit;
}
@@ -669,6 +670,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out_err;
vmw_apply_relocations(sw_context);
+
+ if (arg->throttle_us) {
+ ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
+ arg->throttle_us);
+
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+
vmw_fifo_commit(dev_priv, arg->command_size);
ret = vmw_fifo_send_fence(dev_priv, &sequence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 7421aaa..b0866f04e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- /* without multimon its hard to resize */
- if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
- (var->xres != par->max_width ||
- var->yres != par->max_height)) {
- DRM_ERROR("Tried to resize, but we don't have multimon\n");
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+ (var->xoffset != 0 || var->yoffset != 0)) {
+ DRM_ERROR("Can not handle panning without display topology\n");
return -EINVAL;
}
- if (var->xres > par->max_width ||
- var->yres > par->max_height) {
+ if ((var->xoffset + var->xres) > par->max_width ||
+ (var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
@@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info)
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
- vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
- vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
+ vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+ if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
-
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
@@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- } else {
- vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
-
- /* TODO check if pitch and offset changes */
}
+ /* This is really helpful since if this fails the user
+ * can probably not see anything on the screen.
+ */
+ WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
+
return 0;
}
@@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
+ /* XXX These shouldn't be hardcoded. */
initial_width = 800;
initial_height = 600;
fb_bbp = 32;
fb_depth = 24;
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
- fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- } else {
- fb_width = min(vmw_priv->fb_max_width, initial_width);
- fb_height = min(vmw_priv->fb_max_height, initial_height);
- }
+ /* XXX As shouldn't these be as well. */
+ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height);
- vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
- vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
- vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
-
- fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+ fb_pitch = fb_width * fb_bbp / 8;
+ fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
- fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
-
- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
- DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
- DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
- DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
- DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
- DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
- DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
- DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
- DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
- DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
- DRM_DEBUG("fb_pitch %u\n", fb_pitch);
- DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
@@ -659,6 +616,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
goto err_unlock;
ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
+
+ /* Could probably bug on */
+ WARN_ON(bo->offset != 0);
+
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 0000000..61eacc1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_fence {
+ struct list_head head;
+ uint32_t sequence;
+ struct timespec submitted;
+};
+
+void vmw_fence_queue_init(struct vmw_fence_queue *queue)
+{
+ INIT_LIST_HEAD(&queue->head);
+ queue->lag = ns_to_timespec(0);
+ getrawmonotonic(&queue->lag_time);
+ spin_lock_init(&queue->lock);
+}
+
+void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
+{
+ struct vmw_fence *fence, *next;
+
+ spin_lock(&queue->lock);
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ kfree(fence);
+ }
+ spin_unlock(&queue->lock);
+}
+
+int vmw_fence_push(struct vmw_fence_queue *queue,
+ uint32_t sequence)
+{
+ struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+ if (unlikely(!fence))
+ return -ENOMEM;
+
+ fence->sequence = sequence;
+ getrawmonotonic(&fence->submitted);
+ spin_lock(&queue->lock);
+ list_add_tail(&fence->head, &queue->head);
+ spin_unlock(&queue->lock);
+
+ return 0;
+}
+
+int vmw_fence_pull(struct vmw_fence_queue *queue,
+ uint32_t signaled_sequence)
+{
+ struct vmw_fence *fence, *next;
+ struct timespec now;
+ bool updated = false;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+
+ if (list_empty(&queue->head)) {
+ queue->lag = ns_to_timespec(0);
+ queue->lag_time = now;
+ updated = true;
+ goto out_unlock;
+ }
+
+ list_for_each_entry_safe(fence, next, &queue->head, head) {
+ if (signaled_sequence - fence->sequence > (1 << 30))
+ continue;
+
+ queue->lag = timespec_sub(now, fence->submitted);
+ queue->lag_time = now;
+ updated = true;
+ list_del(&fence->head);
+ kfree(fence);
+ }
+
+out_unlock:
+ spin_unlock(&queue->lock);
+
+ return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+ struct timespec t2)
+{
+ t1.tv_sec += t2.tv_sec;
+ t1.tv_nsec += t2.tv_nsec;
+ if (t1.tv_nsec >= 1000000000L) {
+ t1.tv_sec += 1;
+ t1.tv_nsec -= 1000000000L;
+ }
+
+ return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
+{
+ struct timespec now;
+
+ spin_lock(&queue->lock);
+ getrawmonotonic(&now);
+ queue->lag = vmw_timespec_add(queue->lag,
+ timespec_sub(now, queue->lag_time));
+ queue->lag_time = now;
+ spin_unlock(&queue->lock);
+ return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_fence_queue *queue,
+ uint32_t us)
+{
+ struct timespec lag, cond;
+
+ cond = ns_to_timespec((s64) us * 1000);
+ lag = vmw_fifo_lag(queue);
+ return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+ struct vmw_fence_queue *queue, uint32_t us)
+{
+ struct vmw_fence *fence;
+ uint32_t sequence;
+ int ret;
+
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+ sequence = atomic_read(&dev_priv->fence_seq);
+ else {
+ fence = list_first_entry(&queue->head,
+ struct vmw_fence, head);
+ sequence = fence->sequence;
+ }
+ spin_unlock(&queue->lock);
+
+ ret = vmw_wait_fence(dev_priv, false, sequence, true,
+ 3*HZ);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ (void) vmw_fence_pull(queue, sequence);
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39d43a0..e6a1eb7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+ return false;
+
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
return true;
}
+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t caps;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+ return false;
+
+ caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ if (caps & SVGA_FIFO_CAP_PITCHLOCK)
+ return true;
+
+ return false;
+}
+
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
-
+ vmw_fence_queue_init(&fifo->fence_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
out_err:
vfree(fifo->static_buffer);
@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state);
mutex_unlock(&dev_priv->hw_mutex);
+ vmw_fence_queue_takedown(&fifo->fence_queue);
if (likely(fifo->last_buffer != NULL)) {
vfree(fifo->last_buffer);
@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fifo_state->last_buffer_add = true;
vmw_fifo_commit(dev_priv, bytes);
fifo_state->last_buffer_add = false;
+ (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
+ vmw_update_sequence(dev_priv, fifo_state);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4d7cb53..e92298a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
return (busy == 0);
}
+void vmw_update_sequence(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo_state)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+ if (dev_priv->last_read_sequence != sequence) {
+ dev_priv->last_read_sequence = sequence;
+ vmw_fence_pull(&fifo_state->fence_queue, sequence);
+ }
+}
bool vmw_fence_signaled(struct vmw_private *dev_priv,
uint32_t sequence)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
struct vmw_fifo_state *fifo_state;
bool ret;
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
- dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ fifo_state = &dev_priv->fifo;
+ vmw_update_sequence(dev_priv, fifo_state);
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
- fifo_state = &dev_priv->fifo;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, sequence))
return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbc7c4c..f1d6261 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,8 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
+ struct vmw_dma_buffer *buffer;
struct delayed_work d_work;
struct mutex work_lock;
bool present_fs;
@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
vfbs->base.base.depth = 24;
vfbs->base.base.width = width;
vfbs->base.base.height = height;
- vfbs->base.pin = NULL;
- vfbs->base.unpin = NULL;
+ vfbs->base.pin = &vmw_surface_dmabuf_pin;
+ vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
mutex_init(&vfbs->work_lock);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.create_handle = vmw_framebuffer_create_handle,
};
+static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
+ unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
+ int ret;
+
+ vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
+ if (unlikely(vfbs->buffer == NULL))
+ return -ENOMEM;
+
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
+ &vmw_vram_ne_placement,
+ false, &vmw_dmabuf_bo_free);
+ vmw_overlay_resume_all(dev_priv);
+
+ return ret;
+}
+
+static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+ struct ttm_buffer_object *bo;
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(&vfb->base);
+
+ bo = &vfbs->buffer->base;
+ ttm_bo_unref(&bo);
+ vfbs->buffer = NULL;
+
+ return 0;
+}
+
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
+
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
- if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-
- vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
- vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
- vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
- vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
- vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
- vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
- vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
- vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
- } else
- WARN_ON(true);
-
vmw_overlay_resume_all(dev_priv);
+ WARN_ON(ret != 0);
+
return 0;
}
@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbd->base.base.bits_per_pixel = 32;
- vfbd->base.base.pitch = width * 32 / 4;
+ vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
vfbd->base.base.depth = 24;
vfbd->base.base.width = width;
vfbd->base.base.height = height;
@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
- dev->mode_config.max_width = dev_priv->fb_max_width;
- dev->mode_config.max_height = dev_priv->fb_max_height;
+ /* assumed largest fb size */
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
ret = vmw_kms_init_legacy_display_system(dev_priv);
@@ -826,49 +846,140 @@ out:
return ret;
}
+void vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bbp, unsigned depth)
+{
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+}
+
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
- /*
- * setup a single multimon monitor with the size
- * of 0x0, this stops the UI from resizing when we
- * change the framebuffer size
- */
- if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
+ struct vmw_vga_topology_state *save;
+ uint32_t i;
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
+ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
- vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_priv->vga_pitchlock =
+ vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
+ SVGA_FIFO_PITCHLOCK);
+
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+ return 0;
+ vmw_priv->num_displays = vmw_read(vmw_priv,
+ SVGA_REG_NUM_GUEST_DISPLAYS);
+
+ for (i = 0; i < vmw_priv->num_displays; ++i) {
+ save = &vmw_priv->vga_save[i];
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+ save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
+ save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
+ save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
+ save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
+ save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
return 0;
}
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
+ struct vmw_vga_topology_state *save;
+ uint32_t i;
+
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
+ if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
+ vmw_priv->vga_pitchlock);
+ else if (vmw_fifo_have_pitchlock(vmw_priv))
+ iowrite32(vmw_priv->vga_pitchlock,
+ vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+
+ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+ return 0;
- /* TODO check for multimon */
- vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+ for (i = 0; i < vmw_priv->num_displays; ++i) {
+ save = &vmw_priv->vga_save[i];
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
return 0;
}
+
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_update_layout_arg *arg =
+ (struct drm_vmw_update_layout_arg *)data;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ void __user *user_rects;
+ struct drm_vmw_rect *rects;
+ unsigned rects_size;
+ int ret;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (!arg->num_outputs) {
+ struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
+ goto out_unlock;
+ }
+
+ rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+ rects = kzalloc(rects_size, GFP_KERNEL);
+ if (unlikely(!rects)) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ user_rects = (void __user *)(unsigned long)arg->rects;
+ ret = copy_from_user(rects, user_rects, rects_size);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to get rects.\n");
+ goto out_free;
+ }
+
+ vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
+
+out_free:
+ kfree(rects);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8b95249..8a398a0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
/*
- * Legacy display unit functions - vmwgfx_ldu.h
+ * Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 9089159..cfaf690 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -38,6 +38,7 @@ struct vmw_legacy_display {
struct list_head active;
unsigned num_active;
+ unsigned last_num_active;
struct vmw_framebuffer *fb;
};
@@ -48,9 +49,12 @@ struct vmw_legacy_display {
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
- struct list_head active;
+ unsigned pref_width;
+ unsigned pref_height;
+ bool pref_active;
+ struct drm_display_mode *pref_mode;
- unsigned unit;
+ struct list_head active;
};
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
- struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_crtc *crtc = NULL;
int i = 0;
- /* to stop the screen from changing size on resize */
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
- for (i = 0; i < lds->num_active; i++) {
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ /* If there is no display topology the host just assumes
+ * that the guest will set the same layout as the host.
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
+ int w = 0, h = 0;
+ list_for_each_entry(entry, &lds->active, active) {
+ crtc = &entry->base.crtc;
+ w = max(w, crtc->x + crtc->mode.hdisplay);
+ h = max(h, crtc->y + crtc->mode.vdisplay);
+ i++;
+ }
+
+ if (crtc == NULL)
+ return 0;
+ fb = entry->base.crtc.fb;
+
+ vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
+
+ return 0;
}
- /* Now set the mode */
- vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+ if (!list_empty(&lds->active)) {
+ entry = list_entry(lds->active.next, typeof(*entry), active);
+ fb = entry->base.crtc.fb;
+
+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+ fb->bits_per_pixel, fb->depth);
+ }
+
+ /* Make sure we always show something. */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
+ lds->num_active ? lds->num_active : 1);
+
i = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
@@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
i++;
}
+ BUG_ON(i != lds->num_active);
+
+ lds->last_num_active = lds->num_active;
+
return 0;
}
@@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
if (list_empty(&ldu->active))
return 0;
+ /* Must init otherwise list_empty(&ldu->active) will not work. */
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
@@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *entry;
struct list_head *at;
+ BUG_ON(!ld->num_active && ld->fb);
+ if (vfb != ld->fb) {
+ if (ld->fb && ld->fb->unpin)
+ ld->fb->unpin(ld->fb);
+ if (vfb->pin)
+ vfb->pin(vfb);
+ ld->fb = vfb;
+ }
+
if (!list_empty(&ldu->active))
return 0;
at = &ld->active;
list_for_each_entry(entry, &ld->active, active) {
- if (entry->unit > ldu->unit)
+ if (entry->base.unit > ldu->base.unit)
break;
at = &entry->active;
}
list_add(&ldu->active, at);
- if (ld->num_active++ == 0) {
- BUG_ON(ld->fb);
- if (vfb->pin)
- vfb->pin(vfb);
- ld->fb = vfb;
- }
+
+ ld->num_active++;
return 0;
}
@@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
/* ldu only supports one fb active at the time */
if (dev_priv->ldu_priv->fb && vfb &&
+ !(dev_priv->ldu_priv->num_active == 1 &&
+ !list_empty(&ldu->active)) &&
dev_priv->ldu_priv->fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL;
@@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
static enum drm_connector_status
vmw_ldu_connector_detect(struct drm_connector *connector)
{
- /* XXX vmwctrl should control connection status */
- if (vmw_connector_to_ldu(connector)->base.unit == 0)
+ if (vmw_connector_to_ldu(connector)->pref_active)
return connector_status_connected;
return connector_status_disconnected;
}
@@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
- { DRM_MODE("800x600",
- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
- 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
@@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
+ struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
+ struct drm_display_mode prefmode = { DRM_MODE("preferred",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
int i;
+ /* Add preferred mode */
+ {
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = ldu->pref_width;
+ mode->vdisplay = ldu->pref_height;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_probed_add(connector, mode);
+
+ if (ldu->pref_mode) {
+ list_del_init(&ldu->pref_mode->head);
+ drm_mode_destroy(dev, ldu->pref_mode);
+ }
+
+ ldu->pref_mode = mode;
+ }
+
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
vmw_ldu_connector_builtin[i].vdisplay > max_height)
@@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
if (!ldu)
return -ENOMEM;
- ldu->unit = unit;
+ ldu->base.unit = unit;
crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder;
connector = &ldu->base.connector;
+ INIT_LIST_HEAD(&ldu->active);
+
+ ldu->pref_active = (unit == 0);
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_mode = NULL;
+
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- /* Initial status */
- if (unit == 0)
- connector->status = connector_status_connected;
- else
- connector->status = connector_status_disconnected;
+ connector->status = vmw_ldu_connector_detect(connector);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
@@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
- INIT_LIST_HEAD(&ldu->active);
-
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_connector_attach_property(connector,
@@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
dev_priv->ldu_priv->num_active = 0;
+ dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
drm_mode_create_dirty_info_property(dev_priv->dev);
vmw_ldu_init(dev_priv, 0);
- vmw_ldu_init(dev_priv, 1);
- vmw_ldu_init(dev_priv, 2);
- vmw_ldu_init(dev_priv, 3);
- vmw_ldu_init(dev_priv, 4);
- vmw_ldu_init(dev_priv, 5);
- vmw_ldu_init(dev_priv, 6);
- vmw_ldu_init(dev_priv, 7);
+ /* for old hardware without multimon only enable one display */
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_ldu_init(dev_priv, 1);
+ vmw_ldu_init(dev_priv, 2);
+ vmw_ldu_init(dev_priv, 3);
+ vmw_ldu_init(dev_priv, 4);
+ vmw_ldu_init(dev_priv, 5);
+ vmw_ldu_init(dev_priv, 6);
+ vmw_ldu_init(dev_priv, 7);
+ }
return 0;
}
@@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
return 0;
}
+
+int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+ struct drm_vmw_rect *rects)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_connector *con;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+ DRM_INFO("%s: new layout ", __func__);
+ for (i = 0; i < (int)num; i++)
+ DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+ rects[i].w, rects[i].h);
+ DRM_INFO("\n");
+#else
+ (void)i;
+#endif
+
+ list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+ ldu = vmw_connector_to_ldu(con);
+ if (num > ldu->base.unit) {
+ ldu->pref_width = rects[ldu->base.unit].w;
+ ldu->pref_height = rects[ldu->base.unit].h;
+ ldu->pref_active = true;
+ } else {
+ ldu->pref_width = 800;
+ ldu->pref_height = 600;
+ ldu->pref_active = false;
+ }
+ con->status = vmw_ldu_connector_detect(con);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index ad566c8..df2036e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf);
stream->saved = *arg;
+ /* stream is no longer stopped/paused */
+ stream->paused = false;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index f8fbbc6..8612378 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = copy_from_user(srf->sizes, user_sizes,
srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
goto out_err1;
+ }
if (srf->scanout &&
srf->num_sizes == 1 &&
@@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
+ ret = -EFAULT;
+ }
out_bad_resource:
out_no_reference:
ttm_base_object_unref(&base);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 441e38c..b87569e 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1,12 +1,32 @@
/*
- * vgaarb.c
+ * vgaarb.c: Implements the VGA arbitration. For details refer to
+ * Documentation/vgaarbiter.txt
+ *
*
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
*
- * Implements the VGA arbitration. For details refer to
- * Documentation/vgaarbiter.txt
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
+ *
*/
#include <linux/module.h>
@@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
(vgadev->decodes & VGA_RSRC_LEGACY_MEM))
rsrc |= VGA_RSRC_LEGACY_MEM;
- pr_devel("%s: %d\n", __func__, rsrc);
- pr_devel("%s: owns: %d\n", __func__, vgadev->owns);
+ pr_debug("%s: %d\n", __func__, rsrc);
+ pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
/* Check what resources we need to acquire */
wants = rsrc & ~vgadev->owns;
@@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
{
unsigned int old_locks = vgadev->locks;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
/* Update our counters, and account for equivalent legacy resources
* if we decode them
@@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
else
vga_decode_count--;
}
+ pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
@@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 5;
remaining -= 5;
- pr_devel("client 0x%p called 'lock'\n", priv);
+ pr_debug("client 0x%p called 'lock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 7;
remaining -= 7;
- pr_devel("client 0x%p called 'unlock'\n", priv);
+ pr_debug("client 0x%p called 'unlock'\n", priv);
if (strncmp(curr_pos, "all", 3) == 0)
io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
@@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 8;
remaining -= 8;
- pr_devel("client 0x%p called 'trylock'\n", priv);
+ pr_debug("client 0x%p called 'trylock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 7;
remaining -= 7;
- pr_devel("client 0x%p called 'target'\n", priv);
+ pr_debug("client 0x%p called 'target'\n", priv);
/* if target is default */
if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
@@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
ret_val = -EPROTO;
goto done;
}
- pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+ pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
pbus = pci_find_bus(domain, bus);
- pr_devel("vgaarb: pbus %p\n", pbus);
+ pr_debug("vgaarb: pbus %p\n", pbus);
if (pbus == NULL) {
pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
domain, bus);
@@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
goto done;
}
pdev = pci_get_slot(pbus, devfn);
- pr_devel("vgaarb: pdev %p\n", pdev);
+ pr_debug("vgaarb: pdev %p\n", pdev);
if (!pdev) {
pr_err("vgaarb: invalid PCI address %x:%x\n",
bus, devfn);
@@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
}
vgadev = vgadev_find(pdev);
- pr_devel("vgaarb: vgadev %p\n", vgadev);
+ pr_debug("vgaarb: vgadev %p\n", vgadev);
if (vgadev == NULL) {
pr_err("vgaarb: this pci device is not a vga device\n");
pci_dev_put(pdev);
@@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
curr_pos += 8;
remaining -= 8;
- pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv);
+ pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
{
struct vga_arb_private *priv = file->private_data;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
@@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file)
struct vga_arb_private *priv;
unsigned long flags;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
if (priv == NULL)
@@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
unsigned long flags;
int i;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
@@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
uc = &priv->cards[i];
if (uc->pdev == NULL)
continue;
- pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n",
+ pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
uc->io_cnt, uc->mem_cnt);
while (uc->io_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
@@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
struct pci_dev *pdev = to_pci_dev(dev);
bool notify = false;
- pr_devel("%s\n", __func__);
+ pr_debug("%s\n", __func__);
/* For now we're only intereted in devices added and removed. I didn't
* test this thing here, so someone needs to double check for the
OpenPOWER on IntegriCloud