summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-03-28 20:22:18 -0400
committerDave Airlie <airlied@redhat.com>2009-03-28 20:22:18 -0400
commit90f959bcb386da2c71613dcefc6a285e054a539e (patch)
treeee3e9dd4111d4aad12e579cb0c2c159114dff263 /drivers/gpu
parent41f13fe81dd1b08723ab9f3fc3c7f29cfa81f1a5 (diff)
parent07d43ba98621f08e252a48c96b258b4d572b0257 (diff)
downloadop-kernel-dev-90f959bcb386da2c71613dcefc6a285e054a539e.zip
op-kernel-dev-90f959bcb386da2c71613dcefc6a285e054a539e.tar.gz
drm: merge Linux master into HEAD
Conflicts: drivers/gpu/drm/drm_info.c drivers/gpu/drm/drm_proc.c drivers/gpu/drm/i915/i915_gem_debugfs.c
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_fops.c6
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c894
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/intel_display.c406
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c148
14 files changed, 1375 insertions, 353 deletions
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index f52663e..e13cb62 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -337,14 +337,10 @@ int drm_fasync(int fd, struct file *filp, int on)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
- int retcode;
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
(long)old_encode_dev(priv->minor->device));
- retcode = fasync_helper(fd, filp, on, &dev->buf_async);
- if (retcode < 0)
- return retcode;
- return 0;
+ return fasync_helper(fd, filp, on, &dev->buf_async);
}
EXPORT_SYMBOL(drm_fasync);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 60a1b6c..f0f6c6b 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -286,9 +286,9 @@ int drm_vma_info(struct seq_file *m, void *data)
#endif
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n",
+ seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
atomic_read(&dev->vma_count),
- high_memory, virt_to_phys(high_memory));
+ high_memory, (u64)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index f7510a8..5de573a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -349,8 +349,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
DRM_DEBUG("adding \"%s\" to sysfs\n",
drm_get_connector_name(connector));
- snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s",
- dev->primary->index, drm_get_connector_name(connector));
+ dev_set_name(&connector->kdev, "card%d-%s",
+ dev->primary->index, drm_get_connector_name(connector));
ret = device_register(&connector->kdev);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4d9f5c6..85549f6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->space >= n)
return 0;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
if (ring->head != last_head)
i = 0;
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
return ret;
}
-static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
+static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
for (i = 0; i < dwords;) {
int cmd, sz;
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
- return -EINVAL;
+ cmd = buffer[i];
if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
return -EINVAL;
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
OUT_RING(cmd);
while (++i, --sz) {
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
- sizeof(cmd))) {
- return -EINVAL;
- }
- OUT_RING(cmd);
+ OUT_RING(buffer[i]);
}
}
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
int
i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect __user *boxes,
+ struct drm_clip_rect *boxes,
int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect box;
+ struct drm_clip_rect box = boxes[i];
RING_LOCALS;
- if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
- return -EFAULT;
- }
-
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
- drm_i915_cmdbuffer_t * cmd)
+ drm_i915_cmdbuffer_t *cmd,
+ struct drm_clip_rect *cliprects,
+ void *cmdbuf)
{
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- ret = i915_emit_box(dev, cmd->cliprects, i,
+ ret = i915_emit_box(dev, cliprects, i,
cmd->DR1, cmd->DR4);
if (ret)
return ret;
}
- ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+ ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
if (ret)
return ret;
}
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
}
static int i915_dispatch_batchbuffer(struct drm_device * dev,
- drm_i915_batchbuffer_t * batch)
+ drm_i915_batchbuffer_t * batch,
+ struct drm_clip_rect *cliprects)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect __user *boxes = batch->cliprects;
int nbox = batch->num_cliprects;
int i = 0, count;
RING_LOCALS;
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, boxes, i,
+ int ret = i915_emit_box(dev, cliprects, i,
batch->DR1, batch->DR4);
if (ret)
return ret;
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
+ struct drm_clip_rect *cliprects = NULL;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
- batch->num_cliprects *
- sizeof(struct drm_clip_rect)))
- return -EFAULT;
+ if (batch->num_cliprects < 0)
+ return -EINVAL;
+
+ if (batch->num_cliprects) {
+ cliprects = drm_calloc(batch->num_cliprects,
+ sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+ if (cliprects == NULL)
+ return -ENOMEM;
+
+ ret = copy_from_user(cliprects, batch->cliprects,
+ batch->num_cliprects *
+ sizeof(struct drm_clip_rect));
+ if (ret != 0)
+ goto fail_free;
+ }
mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_batchbuffer(dev, batch);
+ ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
mutex_unlock(&dev->struct_mutex);
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_free:
+ drm_free(cliprects,
+ batch->num_cliprects * sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+
return ret;
}
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
+ struct drm_clip_rect *cliprects = NULL;
+ void *batch_data;
int ret;
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (cmdbuf->num_cliprects &&
- DRM_VERIFYAREA_READ(cmdbuf->cliprects,
- cmdbuf->num_cliprects *
- sizeof(struct drm_clip_rect))) {
- DRM_ERROR("Fault accessing cliprects\n");
- return -EFAULT;
+ if (cmdbuf->num_cliprects < 0)
+ return -EINVAL;
+
+ batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
+ if (batch_data == NULL)
+ return -ENOMEM;
+
+ ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+ if (ret != 0)
+ goto fail_batch_free;
+
+ if (cmdbuf->num_cliprects) {
+ cliprects = drm_calloc(cmdbuf->num_cliprects,
+ sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+ if (cliprects == NULL)
+ goto fail_batch_free;
+
+ ret = copy_from_user(cliprects, cmdbuf->cliprects,
+ cmdbuf->num_cliprects *
+ sizeof(struct drm_clip_rect));
+ if (ret != 0)
+ goto fail_clip_free;
}
mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+ ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- return ret;
+ goto fail_batch_free;
}
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
+
+fail_batch_free:
+ drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
+fail_clip_free:
+ drm_free(cliprects,
+ cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
+
+ return ret;
}
static int i915_flip_bufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1bc45a7..c1685d0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
/** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem;
- struct page **page_list;
+ struct page **pages;
+ int pages_refcount;
/**
* Current offset of the object in GTT space.
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect __user *boxes,
+ struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
/* i915_irq.c */
@@ -786,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E22 || \
IS_GM45(dev))
+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
- (dev)->pci_device == 0x29D2)
+ (dev)->pci_device == 0x29D2 || \
+ (IS_IGD(dev)))
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
+ IS_IGD(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8d5ec5f..e5d2bdf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_get_pages(struct drm_gem_object *obj);
+static void i915_gem_object_put_pages(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static inline int
+fast_shmem_read(struct page **pages,
+ loff_t page_base, int page_offset,
+ char __user *data,
+ int length)
+{
+ char __iomem *vaddr;
+ int ret;
+
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+ if (vaddr == NULL)
+ return -ENOMEM;
+ ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ return ret;
+}
+
+static inline int
+slow_shmem_copy(struct page *dst_page,
+ int dst_offset,
+ struct page *src_page,
+ int src_offset,
+ int length)
+{
+ char *dst_vaddr, *src_vaddr;
+
+ dst_vaddr = kmap_atomic(dst_page, KM_USER0);
+ if (dst_vaddr == NULL)
+ return -ENOMEM;
+
+ src_vaddr = kmap_atomic(src_page, KM_USER1);
+ if (src_vaddr == NULL) {
+ kunmap_atomic(dst_vaddr, KM_USER0);
+ return -ENOMEM;
+ }
+
+ memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+
+ kunmap_atomic(src_vaddr, KM_USER1);
+ kunmap_atomic(dst_vaddr, KM_USER0);
+
+ return 0;
+}
+
+/**
+ * This is the fast shmem pread path, which attempts to copy_from_user directly
+ * from the backing pages of the object to the user's address space. On a
+ * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
+ */
+static int
+i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ ssize_t remain;
+ loff_t offset, page_base;
+ char __user *user_data;
+ int page_offset, page_length;
+ int ret;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
+
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+ args->size);
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
+ offset = args->offset;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * page_base = page offset within aperture
+ * page_offset = offset within page
+ * page_length = bytes to copy for this page
+ */
+ page_base = (offset & ~(PAGE_SIZE-1));
+ page_offset = offset & (PAGE_SIZE-1);
+ page_length = remain;
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+ ret = fast_shmem_read(obj_priv->pages,
+ page_base, page_offset,
+ user_data, page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/**
+ * This is the fallback shmem pread path, which allocates temporary storage
+ * in kernel space to copy_to_user into outside of the struct_mutex, so we
+ * can copy out of the object's backing pages while holding the struct mutex
+ * and not take page faults.
+ */
+static int
+i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct mm_struct *mm = current->mm;
+ struct page **user_pages;
+ ssize_t remain;
+ loff_t offset, pinned_pages, i;
+ loff_t first_data_page, last_data_page, num_pages;
+ int shmem_page_index, shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int page_length;
+ int ret;
+ uint64_t data_ptr = args->data_ptr;
+
+ remain = args->size;
+
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, yet we want to hold it while
+ * dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+ user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (user_pages == NULL)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto fail_put_user_pages;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
+
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+ args->size);
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
+ offset = args->offset;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * shmem_page_index = page number within shmem file
+ * shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ shmem_page_index = offset / PAGE_SIZE;
+ shmem_page_offset = offset & ~PAGE_MASK;
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~PAGE_MASK;
+
+ page_length = remain;
+ if ((shmem_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ ret = slow_shmem_copy(user_pages[data_page_index],
+ data_page_offset,
+ obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ data_ptr += page_length;
+ offset += page_length;
+ }
+
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+ for (i = 0; i < pinned_pages; i++) {
+ SetPageDirty(user_pages[i]);
+ page_cache_release(user_pages[i]);
+ }
+ kfree(user_pages);
+
+ return ret;
+}
+
/**
* Reads data from the object referenced by handle.
*
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pread *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- ssize_t read;
- loff_t offset;
int ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- offset = args->offset;
-
- read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
- args->size, &offset);
- if (read != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (read < 0)
- return read;
- else
- return -EINVAL;
- }
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ if (ret != 0)
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
/* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping,
*/
static inline int
-slow_user_write(struct io_mapping *mapping,
- loff_t page_base, int page_offset,
- char __user *user_data,
- int length)
+slow_kernel_write(struct io_mapping *mapping,
+ loff_t gtt_base, int gtt_offset,
+ struct page *user_page, int user_offset,
+ int length)
{
- char __iomem *vaddr;
+ char *src_vaddr, *dst_vaddr;
unsigned long unwritten;
- vaddr = io_mapping_map_wc(mapping, page_base);
- if (vaddr == NULL)
- return -EFAULT;
- unwritten = __copy_from_user(vaddr + page_offset,
- user_data, length);
- io_mapping_unmap(vaddr);
+ dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
+ src_vaddr = kmap_atomic(user_page, KM_USER1);
+ unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
+ src_vaddr + user_offset,
+ length);
+ kunmap_atomic(src_vaddr, KM_USER1);
+ io_mapping_unmap_atomic(dst_vaddr);
if (unwritten)
return -EFAULT;
return 0;
}
+static inline int
+fast_shmem_write(struct page **pages,
+ loff_t page_base, int page_offset,
+ char __user *data,
+ int length)
+{
+ char __iomem *vaddr;
+
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+ if (vaddr == NULL)
+ return -ENOMEM;
+ __copy_from_user_inatomic(vaddr + page_offset, data, length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ return 0;
+}
+
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
static int
-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
obj_priv = obj->driver_private;
offset = obj_priv->gtt_offset + args->offset;
- obj_priv->dirty = 1;
while (remain > 0) {
/* Operation in this page
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
page_offset, user_data, page_length);
/* If we get a fault while copying data, then (presumably) our
- * source page isn't available. In this case, use the
- * non-atomic function
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
*/
- if (ret) {
- ret = slow_user_write (dev_priv->mm.gtt_mapping,
- page_base, page_offset,
- user_data, page_length);
- if (ret)
- goto fail;
- }
+ if (ret)
+ goto fail;
remain -= page_length;
user_data += page_length;
@@ -315,39 +527,284 @@ fail:
return ret;
}
+/**
+ * This is the fallback GTT pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This code resulted in x11perf -rgb10text consuming about 10% more CPU
+ * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
+ */
static int
-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ ssize_t remain;
+ loff_t gtt_page_base, offset;
+ loff_t first_data_page, last_data_page, num_pages;
+ loff_t pinned_pages, i;
+ struct page **user_pages;
+ struct mm_struct *mm = current->mm;
+ int gtt_page_offset, data_page_offset, data_page_index, page_length;
int ret;
- loff_t offset;
- ssize_t written;
+ uint64_t data_ptr = args->data_ptr;
+
+ remain = args->size;
+
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, and all of the pwrite implementations
+ * want to hold it while dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+ user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (user_pages == NULL)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto out_unpin_pages;
+ }
mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(obj, 0);
+ if (ret)
+ goto out_unlock;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ goto out_unpin_object;
+
+ obj_priv = obj->driver_private;
+ offset = obj_priv->gtt_offset + args->offset;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * gtt_page_base = page offset within aperture
+ * gtt_page_offset = offset within page in aperture
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ gtt_page_base = offset & PAGE_MASK;
+ gtt_page_offset = offset & ~PAGE_MASK;
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~PAGE_MASK;
+
+ page_length = remain;
+ if ((gtt_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - gtt_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
+ gtt_page_base, gtt_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
+
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
+ */
+ if (ret)
+ goto out_unpin_object;
+
+ remain -= page_length;
+ offset += page_length;
+ data_ptr += page_length;
+ }
+
+out_unpin_object:
+ i915_gem_object_unpin(obj);
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out_unpin_pages:
+ for (i = 0; i < pinned_pages; i++)
+ page_cache_release(user_pages[i]);
+ kfree(user_pages);
+
+ return ret;
+}
+
+/**
+ * This is the fast shmem pwrite path, which attempts to directly
+ * copy_from_user into the kmapped pages backing the object.
+ */
+static int
+i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ ssize_t remain;
+ loff_t offset, page_base;
+ char __user *user_data;
+ int page_offset, page_length;
+ int ret;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
+ offset = args->offset;
+ obj_priv->dirty = 1;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * page_base = page offset within aperture
+ * page_offset = offset within page
+ * page_length = bytes to copy for this page
+ */
+ page_base = (offset & ~(PAGE_SIZE-1));
+ page_offset = offset & (PAGE_SIZE-1);
+ page_length = remain;
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+ ret = fast_shmem_write(obj_priv->pages,
+ page_base, page_offset,
+ user_data, page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/**
+ * This is the fallback shmem pwrite path, which uses get_user_pages to pin
+ * the memory and maps it using kmap_atomic for copying.
+ *
+ * This avoids taking mmap_sem for faulting on the user's address while the
+ * struct_mutex is held.
+ */
+static int
+i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct mm_struct *mm = current->mm;
+ struct page **user_pages;
+ ssize_t remain;
+ loff_t offset, pinned_pages, i;
+ loff_t first_data_page, last_data_page, num_pages;
+ int shmem_page_index, shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int page_length;
+ int ret;
+ uint64_t data_ptr = args->data_ptr;
+
+ remain = args->size;
+
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, and all of the pwrite implementations
+ * want to hold it while dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+ user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (user_pages == NULL)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto fail_put_user_pages;
}
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret != 0)
+ goto fail_unlock;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret != 0)
+ goto fail_put_pages;
+
+ obj_priv = obj->driver_private;
offset = args->offset;
+ obj_priv->dirty = 1;
- written = vfs_write(obj->filp,
- (char __user *)(uintptr_t) args->data_ptr,
- args->size, &offset);
- if (written != args->size) {
- mutex_unlock(&dev->struct_mutex);
- if (written < 0)
- return written;
- else
- return -EINVAL;
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * shmem_page_index = page number within shmem file
+ * shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ shmem_page_index = offset / PAGE_SIZE;
+ shmem_page_offset = offset & ~PAGE_MASK;
+ data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~PAGE_MASK;
+
+ page_length = remain;
+ if ((shmem_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
+ if (ret)
+ goto fail_put_pages;
+
+ remain -= page_length;
+ data_ptr += page_length;
+ offset += page_length;
}
+fail_put_pages:
+ i915_gem_object_put_pages(obj);
+fail_unlock:
mutex_unlock(&dev->struct_mutex);
+fail_put_user_pages:
+ for (i = 0; i < pinned_pages; i++)
+ page_cache_release(user_pages[i]);
+ kfree(user_pages);
- return 0;
+ return ret;
}
/**
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (obj_priv->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0)
- ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
- else
- ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+ dev->gtt_total != 0) {
+ ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+ if (ret == -EFAULT) {
+ ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+ file_priv);
+ }
+ } else {
+ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+ if (ret == -EFAULT) {
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+ file_priv);
+ }
+ }
#if WATCH_PWRITE
if (ret)
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
}
static void
-i915_gem_object_free_page_list(struct drm_gem_object *obj)
+i915_gem_object_put_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count = obj->size / PAGE_SIZE;
int i;
- if (obj_priv->page_list == NULL)
- return;
+ BUG_ON(obj_priv->pages_refcount == 0);
+ if (--obj_priv->pages_refcount != 0)
+ return;
for (i = 0; i < page_count; i++)
- if (obj_priv->page_list[i] != NULL) {
+ if (obj_priv->pages[i] != NULL) {
if (obj_priv->dirty)
- set_page_dirty(obj_priv->page_list[i]);
- mark_page_accessed(obj_priv->page_list[i]);
- page_cache_release(obj_priv->page_list[i]);
+ set_page_dirty(obj_priv->pages[i]);
+ mark_page_accessed(obj_priv->pages[i]);
+ page_cache_release(obj_priv->pages[i]);
}
obj_priv->dirty = 0;
- drm_free(obj_priv->page_list,
+ drm_free(obj_priv->pages,
page_count * sizeof(struct page *),
DRM_MEM_DRIVER);
- obj_priv->page_list = NULL;
+ obj_priv->pages = NULL;
}
static void
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
- i915_gem_object_free_page_list(obj);
+ i915_gem_object_put_pages(obj);
if (obj_priv->gtt_space) {
atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev)
}
static int
-i915_gem_object_get_page_list(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count, i;
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
struct page *page;
int ret;
- if (obj_priv->page_list)
+ if (obj_priv->pages_refcount++ != 0)
return 0;
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->page_list != NULL);
- obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
- DRM_MEM_DRIVER);
- if (obj_priv->page_list == NULL) {
+ BUG_ON(obj_priv->pages != NULL);
+ obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ if (obj_priv->pages == NULL) {
DRM_ERROR("Faled to allocate page list\n");
+ obj_priv->pages_refcount--;
return -ENOMEM;
}
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
if (IS_ERR(page)) {
ret = PTR_ERR(page);
DRM_ERROR("read_mapping_page failed: %d\n", ret);
- i915_gem_object_free_page_list(obj);
+ i915_gem_object_put_pages(obj);
return ret;
}
- obj_priv->page_list[i] = page;
+ obj_priv->pages[i] = page;
}
return 0;
}
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %d at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
- ret = i915_gem_object_get_page_list(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
* into the GTT.
*/
obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->page_list,
+ obj_priv->pages,
page_count,
obj_priv->gtt_offset,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
- i915_gem_object_free_page_list(obj);
+ i915_gem_object_put_pages(obj);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
return -ENOMEM;
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj_priv->page_list == NULL)
+ if (obj_priv->pages == NULL)
return;
- drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+ drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
@@ -2155,7 +2623,7 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
if (obj_priv->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->page_list + i, 1);
+ drm_clflush_pages(obj_priv->pages + i, 1);
}
}
@@ -2220,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (obj_priv->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->page_list + i, 1);
+ drm_clflush_pages(obj_priv->pages + i, 1);
obj_priv->page_cpu_valid[i] = 1;
}
@@ -2241,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry)
+ struct drm_i915_gem_exec_object *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_relocation_entry reloc;
- struct drm_i915_gem_relocation_entry __user *relocs;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
void __iomem *reloc_page;
@@ -2258,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
entry->offset = obj_priv->gtt_offset;
- relocs = (struct drm_i915_gem_relocation_entry __user *)
- (uintptr_t) entry->relocs_ptr;
/* Apply the relocations, using the GTT aperture to avoid cache
* flushing requirements.
*/
for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_obj_priv;
uint32_t reloc_val, reloc_offset;
uint32_t __iomem *reloc_entry;
- ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
-
target_obj = drm_gem_object_lookup(obj->dev, file_priv,
- reloc.target_handle);
+ reloc->target_handle);
if (target_obj == NULL) {
i915_gem_object_unpin(obj);
return -EBADF;
@@ -2288,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
*/
if (target_obj_priv->gtt_space == NULL) {
DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
+ reloc->target_handle);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.offset > obj->size - 4) {
+ if (reloc->offset > obj->size - 4) {
DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->size);
+ obj, reloc->target_handle,
+ (int) reloc->offset, (int) obj->size);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.offset & 3) {
+ if (reloc->offset & 3) {
DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
+ obj, reloc->target_handle,
+ (int) reloc->offset);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
- reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
+ if (reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
target_obj->pending_write_domain);
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
@@ -2347,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
"presumed %08x delta %08x\n",
__func__,
obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
(int) target_obj_priv->gtt_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
+ (int) reloc->presumed_offset,
+ reloc->delta);
#endif
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+ if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
drm_gem_object_unreference(target_obj);
continue;
}
@@ -2377,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
/* Map the page containing the relocation we're going to
* perform.
*/
- reloc_offset = obj_priv->gtt_offset + reloc.offset;
+ reloc_offset = obj_priv->gtt_offset + reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset &
~(PAGE_SIZE - 1)));
reloc_entry = (uint32_t __iomem *)(reloc_page +
(reloc_offset & (PAGE_SIZE - 1)));
- reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+ reloc_val = target_obj_priv->gtt_offset + reloc->delta;
#if WATCH_BUF
DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
- obj, (unsigned int) reloc.offset,
+ obj, (unsigned int) reloc->offset,
readl(reloc_entry), reloc_val);
#endif
writel(reloc_val, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
- /* Write the updated presumed offset for this entry back out
- * to the user.
+ /* The updated presumed offset for this entry will be
+ * copied back out to the user.
*/
- reloc.presumed_offset = target_obj_priv->gtt_offset;
- ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return ret;
- }
+ reloc->presumed_offset = target_obj_priv->gtt_offset;
drm_gem_object_unreference(target_obj);
}
@@ -2419,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
static int
i915_dispatch_gem_execbuffer(struct drm_device *dev,
struct drm_i915_gem_execbuffer *exec,
+ struct drm_clip_rect *cliprects,
uint64_t exec_offset)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
- (uintptr_t) exec->cliprects_ptr;
int nbox = exec->num_cliprects;
int i = 0, count;
uint32_t exec_start, exec_len;
@@ -2444,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, boxes, i,
+ int ret = i915_emit_box(dev, cliprects, i,
exec->DR1, exec->DR4);
if (ret)
return ret;
@@ -2500,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
return ret;
}
+static int
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+ uint32_t buffer_count,
+ struct drm_i915_gem_relocation_entry **relocs)
+{
+ uint32_t reloc_count = 0, reloc_index = 0, i;
+ int ret;
+
+ *relocs = NULL;
+ for (i = 0; i < buffer_count; i++) {
+ if (reloc_count + exec_list[i].relocation_count < reloc_count)
+ return -EINVAL;
+ reloc_count += exec_list[i].relocation_count;
+ }
+
+ *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
+ if (*relocs == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < buffer_count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+ ret = copy_from_user(&(*relocs)[reloc_index],
+ user_relocs,
+ exec_list[i].relocation_count *
+ sizeof(**relocs));
+ if (ret != 0) {
+ drm_free(*relocs, reloc_count * sizeof(**relocs),
+ DRM_MEM_DRIVER);
+ *relocs = NULL;
+ return ret;
+ }
+
+ reloc_index += exec_list[i].relocation_count;
+ }
+
+ return ret;
+}
+
+static int
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+ uint32_t buffer_count,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ uint32_t reloc_count = 0, i;
+ int ret;
+
+ for (i = 0; i < buffer_count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+ if (ret == 0) {
+ ret = copy_to_user(user_relocs,
+ &relocs[reloc_count],
+ exec_list[i].relocation_count *
+ sizeof(*relocs));
+ }
+
+ reloc_count += exec_list[i].relocation_count;
+ }
+
+ drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
+
+ return ret;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -2511,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
- int ret, i, pinned = 0;
+ struct drm_clip_rect *cliprects = NULL;
+ struct drm_i915_gem_relocation_entry *relocs;
+ int ret, ret2, i, pinned = 0;
uint64_t exec_offset;
- uint32_t seqno, flush_domains;
+ uint32_t seqno, flush_domains, reloc_index;
int pin_tries;
#if WATCH_EXEC
@@ -2547,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
+ if (args->num_cliprects != 0) {
+ cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
+ DRM_MEM_DRIVER);
+ if (cliprects == NULL)
+ goto pre_mutex_err;
+
+ ret = copy_from_user(cliprects,
+ (struct drm_clip_rect __user *)
+ (uintptr_t) args->cliprects_ptr,
+ sizeof(*cliprects) * args->num_cliprects);
+ if (ret != 0) {
+ DRM_ERROR("copy %d cliprects failed: %d\n",
+ args->num_cliprects, ret);
+ goto pre_mutex_err;
+ }
+ }
+
+ ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+ &relocs);
+ if (ret != 0)
+ goto pre_mutex_err;
+
mutex_lock(&dev->struct_mutex);
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2589,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Pin and relocate */
for (pin_tries = 0; ; pin_tries++) {
ret = 0;
+ reloc_index = 0;
+
for (i = 0; i < args->buffer_count; i++) {
object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0;
ret = i915_gem_object_pin_and_relocate(object_list[i],
file_priv,
- &exec_list[i]);
+ &exec_list[i],
+ &relocs[reloc_index]);
if (ret)
break;
pinned = i + 1;
+ reloc_index += exec_list[i].relocation_count;
}
/* success */
if (ret == 0)
@@ -2683,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
#endif
/* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+ ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
@@ -2747,11 +3297,27 @@ err:
args->buffer_count, ret);
}
+ /* Copy the updated relocations out regardless of current error
+ * state. Failure to update the relocs would mean that the next
+ * time userland calls execbuf, it would do so with presumed offset
+ * state that didn't match the actual object state.
+ */
+ ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+ relocs);
+ if (ret2 != 0) {
+ DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+
+ if (ret == 0)
+ ret = ret2;
+ }
+
pre_mutex_err:
drm_free(object_list, sizeof(*object_list) * args->buffer_count,
DRM_MEM_DRIVER);
drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
DRM_MEM_DRIVER);
+ drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
+ DRM_MEM_DRIVER);
return ret;
}
@@ -3188,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev)
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
- dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+ dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3218,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
obj = dev_priv->hws_obj;
obj_priv = obj->driver_private;
- kunmap(obj_priv->page_list[0]);
+ kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
dev_priv->hws_obj = NULL;
@@ -3521,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
if (!obj_priv->phys_obj)
return;
- ret = i915_gem_object_get_page_list(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
goto out;
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+ char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(dst, KM_USER0);
}
- drm_clflush_pages(obj_priv->page_list, page_count);
+ drm_clflush_pages(obj_priv->pages, page_count);
drm_agp_chipset_flush(dev);
out:
obj_priv->phys_obj->cur_obj = NULL;
@@ -3577,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj_priv->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_page_list(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
DRM_ERROR("failed to get page list\n");
goto out;
@@ -3586,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+ char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 5a4cdb5..455ec97 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -192,7 +192,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
obj_priv = obj->driver_private;
seq_printf(m, "Fenced object[%2d] = %p: %s "
- "%08x %08x %08x %s %08x %08x %d",
+ "%08x %08zx %08x %s %08x %08x %d",
i, obj, get_pin_flag(obj_priv),
obj_priv->gtt_offset,
obj->size, obj_priv->stride,
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7fb4191..4cce1ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
- IS_GM45(dev)) {
+ } else if (IS_MOBILE(dev)) {
uint32_t dcc;
- /* On 915-945 and GM965, channel interleave by the CPU is
- * determined by DCC. The CPU will alternate based on bit 6
- * in interleaved mode, and the GPU will then also alternate
- * on bit 6, 9, and 10 for X, but the CPU may also optionally
- * alternate based on bit 17 (XOR not disabled and XOR
- * bit == 17).
+ /* On mobile 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
- if (IS_I915G(dev) || IS_I915GM(dev) ||
- dcc & DCC_CHANNEL_XOR_DISABLE) {
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
- (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
- /* GM965/GM45 does either bit 11 or bit 17
- * swizzling.
- */
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
- /* Bit 17 or perhaps other swizzling */
+ /* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 90600d8..377cc58 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -435,6 +436,7 @@
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
/* i830, required in DVO non-gang */
#define PLL_P2_DIVIDE_BY_4 (1 << 23)
#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -501,10 +503,12 @@
#define FPB0 0x06048
#define FPB1 0x0604c
#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_IGD_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
#define FP_M1_DIV_MASK 0x00003f00
#define FP_M1_DIV_SHIFT 8
#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_IGD_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST 0x606c
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -629,6 +633,22 @@
#define TV_HOTPLUG_INT_EN (1 << 18)
#define CRT_HOTPLUG_INT_EN (1 << 9)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
+
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -856,7 +876,7 @@
*/
# define TV_ENC_C0_FIX (1 << 10)
/** Bits that must be preserved by software */
-# define TV_CTL_SAVE ((3 << 8) | (3 << 6))
+# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
# define TV_FUSE_STATE_MASK (3 << 4)
/** Read-only state that reports all features enabled */
# define TV_FUSE_STATE_ENABLED (0 << 4)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5ea715a..de621aa 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -162,13 +162,13 @@ struct bdb_lvds_options {
u8 panel_type;
u8 rsvd1;
/* LVDS capabilities, stored in a dword */
- u8 rsvd2:1;
- u8 lvds_edid:1;
- u8 pixel_dither:1;
- u8 pfit_ratio_auto:1;
- u8 pfit_gfx_mode_enhanced:1;
- u8 pfit_text_mode_enhanced:1;
u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
u8 rsvd4;
} __attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dcaed34..2b6d443 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
static int intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+
+ int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (mode->clock > 400000 || mode->clock < 25000)
- return MODE_CLOCK_RANGE;
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+ if (!IS_I9XX(dev))
+ max_clock = 350000;
+ else
+ max_clock = 400000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
return MODE_OK;
}
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
- if (intel_crtc->pipe == 0)
+ if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- else
+ I915_WRITE(BCLRPAT_A, 0);
+ } else {
adpa |= ADPA_PIPE_B_SELECT;
+ I915_WRITE(BCLRPAT_B, 0);
+ }
I915_WRITE(ADPA, adpa);
}
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp;
-
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+ u32 hotplug_en;
+ int i, tries = 0;
+ /*
+ * On 4 series desktop, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
- do {
- if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
- break;
- msleep(1);
- } while (time_after(timeout, jiffies));
+ if (IS_G4X(dev) && !IS_GM45(dev))
+ tries = 2;
+ else
+ tries = 1;
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~(CRT_HOTPLUG_MASK);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ if (IS_GM45(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
+ /* turn on the FORCE_DETECT */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
+ /* wait for FORCE_DETECT to go off */
+ do {
+ if (!(I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
+ }
if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a283427..d9c50ff 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -56,11 +56,13 @@ typedef struct {
} intel_p2_t;
#define INTEL_P2_NUM 2
-
-typedef struct {
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
-} intel_limit_t;
+ bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+ int, int, intel_clock_t *);
+};
#define I8XX_DOT_MIN 25000
#define I8XX_DOT_MAX 350000
@@ -90,18 +92,32 @@ typedef struct {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
+#define IGD_VCO_MIN 1700000
+#define IGD_VCO_MAX 3500000
#define I9XX_N_MIN 1
#define I9XX_N_MAX 6
+/* IGD's Ncounter is a ring counter */
+#define IGD_N_MIN 3
+#define IGD_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
+#define IGD_M_MIN 2
+#define IGD_M_MAX 256
#define I9XX_M1_MIN 10
#define I9XX_M1_MAX 22
#define I9XX_M2_MIN 5
#define I9XX_M2_MAX 9
+/* IGD M1 is reserved, and must be 0 */
+#define IGD_M1_MIN 0
+#define IGD_M1_MAX 0
+#define IGD_M2_MIN 0
+#define IGD_M2_MAX 254
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
#define I9XX_P_LVDS_MAX 98
+#define IGD_P_LVDS_MIN 7
+#define IGD_P_LVDS_MAX 112
#define I9XX_P1_MIN 1
#define I9XX_P1_MAX 8
#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -115,6 +131,97 @@ typedef struct {
#define INTEL_LIMIT_I8XX_LVDS 1
#define INTEL_LIMIT_I9XX_SDVO_DAC 2
#define INTEL_LIMIT_I9XX_LVDS 3
+#define INTEL_LIMIT_G4X_SDVO 4
+#define INTEL_LIMIT_G4X_HDMI_DAC 5
+#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
+#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
+#define INTEL_LIMIT_IGD_SDVO_DAC 8
+#define INTEL_LIMIT_IGD_LVDS 9
+
+/*The parameter is for SDVO on G4x platform*/
+#define G4X_DOT_SDVO_MIN 25000
+#define G4X_DOT_SDVO_MAX 270000
+#define G4X_VCO_MIN 1750000
+#define G4X_VCO_MAX 3500000
+#define G4X_N_SDVO_MIN 1
+#define G4X_N_SDVO_MAX 4
+#define G4X_M_SDVO_MIN 104
+#define G4X_M_SDVO_MAX 138
+#define G4X_M1_SDVO_MIN 17
+#define G4X_M1_SDVO_MAX 23
+#define G4X_M2_SDVO_MIN 5
+#define G4X_M2_SDVO_MAX 11
+#define G4X_P_SDVO_MIN 10
+#define G4X_P_SDVO_MAX 30
+#define G4X_P1_SDVO_MIN 1
+#define G4X_P1_SDVO_MAX 3
+#define G4X_P2_SDVO_SLOW 10
+#define G4X_P2_SDVO_FAST 10
+#define G4X_P2_SDVO_LIMIT 270000
+
+/*The parameter is for HDMI_DAC on G4x platform*/
+#define G4X_DOT_HDMI_DAC_MIN 22000
+#define G4X_DOT_HDMI_DAC_MAX 400000
+#define G4X_N_HDMI_DAC_MIN 1
+#define G4X_N_HDMI_DAC_MAX 4
+#define G4X_M_HDMI_DAC_MIN 104
+#define G4X_M_HDMI_DAC_MAX 138
+#define G4X_M1_HDMI_DAC_MIN 16
+#define G4X_M1_HDMI_DAC_MAX 23
+#define G4X_M2_HDMI_DAC_MIN 5
+#define G4X_M2_HDMI_DAC_MAX 11
+#define G4X_P_HDMI_DAC_MIN 5
+#define G4X_P_HDMI_DAC_MAX 80
+#define G4X_P1_HDMI_DAC_MIN 1
+#define G4X_P1_HDMI_DAC_MAX 8
+#define G4X_P2_HDMI_DAC_SLOW 10
+#define G4X_P2_HDMI_DAC_FAST 5
+#define G4X_P2_HDMI_DAC_LIMIT 165000
+
+/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
+#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
+#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
+#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
+#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
+#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
+#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
+#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
+#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
+#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
+#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
+#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
+#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
+
+/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
+#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
+#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
+#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
+#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
+#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
+#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
+#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
+#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
+#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
+#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
+#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
+#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
+#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
+#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
+#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
+#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static const intel_limit_t intel_limits[] = {
{ /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
+ .find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I8XX_LVDS */
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
+ .find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I9XX_SDVO_DAC */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
+ .find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I9XX_LVDS */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
*/
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
+ .find_pll = intel_find_best_PLL,
+ },
+ /* below parameter and function is for G4X Chipset Family*/
+ { /* INTEL_LIMIT_G4X_SDVO */
+ .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
+ .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
+ .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
+ .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
+ .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
+ .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
+ .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
+ .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
+ .p2_slow = G4X_P2_SDVO_SLOW,
+ .p2_fast = G4X_P2_SDVO_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_G4X_HDMI_DAC */
+ .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
+ .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
+ .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
+ .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
+ .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
+ .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
+ .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
+ .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
+ .p2_slow = G4X_P2_HDMI_DAC_SLOW,
+ .p2_fast = G4X_P2_HDMI_DAC_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+ .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX },
+ .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
+ .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
+ .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
+ .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
+ .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
+ .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
+ .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
+ .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
+ .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
+ .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+ .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX },
+ .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
+ .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
+ .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
+ .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
+ .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
+ .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
+ .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
+ .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
+ .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
+ .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
+ },
+ .find_pll = intel_g4x_find_best_PLL,
+ },
+ { /* INTEL_LIMIT_IGD_SDVO */
+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
},
+ { /* INTEL_LIMIT_IGD_LVDS */
+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
+ .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
+ /* IGD only supports single-channel mode. */
+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
+ },
+
};
+static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ /* LVDS with dual channel */
+ limit = &intel_limits
+ [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+ else
+ /* LVDS with dual channel */
+ limit = &intel_limits
+ [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+ } else /* The option is for other outputs */
+ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+
+ return limit;
+}
+
static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_I9XX(dev)) {
+ if (IS_G4X(dev)) {
+ limit = intel_g4x_limit(crtc);
+ } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
else
limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ } else if (IS_IGD(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+ else
+ limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
return limit;
}
-static void intel_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in IGD, n is a ring counter */
+static void igd_clock(int refclk, intel_clock_t *clock)
{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+{
+ if (IS_IGD(dev)) {
+ igd_clock(refclk, clock);
+ return;
+ }
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
{
const intel_limit_t *limit = intel_limit (crtc);
+ struct drm_device *dev = crtc->dev;
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
- if (clock->m1 <= clock->m2)
+ if (clock->m1 <= clock->m2 && !IS_IGD(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
return true;
}
-/**
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
- int refclk, intel_clock_t *best_clock)
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
- const intel_limit_t *limit = intel_limit(crtc);
int err = target;
if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
memset (best_clock, 0, sizeof (*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
- clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in IGD */
+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+ break;
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
- intel_clock(refclk, &clock);
+ intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(crtc, &clock))
continue;
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
return (err != target);
}
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ int max_n;
+ bool found;
+ /* approximately equals target * 0.00488 */
+ int err_most = (target >> 8) + (target >> 10);
+ found = false;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ max_n = limit->n.max;
+ /* based on hardware requriment prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirment prefere larger m1,m2, p1 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+ if (!intel_PLL_is_valid(crtc, &clock))
+ continue;
+ this_err = abs(clock.dot - target) ;
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
void
intel_wait_for_vblank(struct drm_device *dev)
{
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
return 400000;
else if (IS_I915G(dev))
return 333000;
- else if (IS_I945GM(dev) || IS_845G(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
return 200000;
else if (IS_I915GM(dev)) {
u16 gcfgc = 0;
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bool is_crt = false, is_lvds = false, is_tv = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ const intel_limit_t *limit;
int ret;
drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
refclk = 48000;
}
- ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (IS_IGD(dev))
+ fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
+ else
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 1)) << 16;
+ if (IS_IGD(dev))
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+ else
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ if (IS_IGD(dev)) {
+ clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
if (IS_I9XX(dev)) {
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ if (IS_IGD(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
}
/* XXX: Handle the 100Mhz refclk */
- intel_clock(96000, &clock);
+ intel_clock(dev, 96000, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
- intel_clock(66000, &clock);
+ intel_clock(dev, 66000, &clock);
} else
- intel_clock(48000, &clock);
+ intel_clock(dev, 48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
else
clock.p2 = 2;
- intel_clock(48000, &clock);
+ intel_clock(dev, 48000, &clock);
}
}
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_I9XX(dev)) {
int found;
+ u32 reg;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
}
- if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
+
+ /* Before G4X SDVOC doesn't have its own detect register */
+ if (IS_G4X(dev))
+ reg = SDVOC;
+ else
+ reg = SDVOB;
+
+ if (I915_READ(reg) & SDVO_DETECTED) {
found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0d211af..6619f26 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
pfit_control = 0;
if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither)
+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
}
else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 56485d6..ceca947 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -217,8 +217,8 @@ static const u32 filter_table[] = {
*/
static const struct color_conversion ntsc_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
- .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
- .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
};
static const struct color_conversion ntsc_m_csc_svideo = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
- .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
- .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
static const struct color_conversion ntsc_j_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
- .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
- .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
+ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
static const struct color_conversion ntsc_j_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
- .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
- .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
+ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
static const struct color_conversion pal_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
- .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
- .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
+ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
};
static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
static const struct color_conversion pal_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
- .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
- .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
+ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
};
static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
static const struct color_conversion pal_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
- .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
- .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
};
static const struct color_conversion pal_m_csc_svideo = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
- .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
- .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
static const struct color_conversion pal_n_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
- .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
- .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
};
static const struct color_conversion pal_n_csc_svideo = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
- .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
- .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
* Component connections
*/
static const struct color_conversion sdtv_csc_yprpb = {
- .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
- .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
- .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
};
static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
};
static const struct color_conversion hdtv_csc_yprpb = {
- .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
- .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
- .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
+ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
};
static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@ struct tv_mode {
static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
- .dda1_inc = 136,
- .dda2_inc = 7624, .dda2_size = 20013,
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "NTSC-443",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
- .dda2_inc = 18557, .dda2_size = 20625,
- .dda3_inc = 0, .dda3_size = 0,
- .sc_reset = TV_SC_RESET_EVERY_8,
- .pal_burst = true,
+ .dda2_inc = 4093, .dda2_size = 27456,
+ .dda3_inc = 310, .dda3_size = 525,
+ .sc_reset = TV_SC_RESET_NEVER,
+ .pal_burst = false,
.composite_levels = &ntsc_m_levels_composite,
.composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "NTSC-J",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
- .dda1_inc = 136,
- .dda2_inc = 7624, .dda2_size = 20013,
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "PAL-M",
- .clock = 107520,
+ .clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
- .dda1_inc = 136,
- .dda2_inc = 7624, .dda2_size = 20013,
+ .dda1_inc = 135,
+ .dda2_inc = 16704, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
- .sc_reset = TV_SC_RESET_EVERY_4,
- .pal_burst = false,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
.composite_levels = &pal_m_levels_composite,
.composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL-N",
- .clock = 107520,
+ .clock = 108000,
.refresh = 25000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
- .dda1_inc = 168,
- .dda2_inc = 18557, .dda2_size = 20625,
- .dda3_inc = 0, .dda3_size = 0,
+ .dda1_inc = 135,
+ .dda2_inc = 23578, .dda2_size = 27648,
+ .dda3_inc = 134, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL",
- .clock = 107520,
+ .clock = 108000,
.refresh = 25000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
- .hsync_end = 64, .hblank_end = 128,
+ .hsync_end = 64, .hblank_end = 142,
.hblank_start = 844, .htotal = 863,
.progressive = false, .trilevel_sync = false,
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
- .dda2_inc = 18557, .dda2_size = 20625,
- .dda3_inc = 0, .dda3_size = 0,
+ .dda2_inc = 4122, .dda2_size = 27648,
+ .dda3_inc = 67, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
- .nbr_end = 496,
+ .nbr_end = 479,
.burst_ena = false,
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
- .nbr_end = 496,
+ .nbr_end = 479,
.burst_ena = false,
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
.component_only = 1,
.hsync_end = 88, .hblank_end = 235,
- .hblank_start = 2155, .htotal = 2200,
+ .hblank_start = 2155, .htotal = 2201,
.progressive = false, .trilevel_sync = true,
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
return MODE_OK;
return MODE_CLOCK_RANGE;
}
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
- tv_ctl = 0;
+ tv_ctl = I915_READ(TV_CTL);
+ tv_ctl &= TV_CTL_SAVE;
switch (tv_priv->type) {
default:
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* dda1 implies valid video levels */
if (tv_mode->dda1_inc) {
scctl1 |= TV_SC_DDA1_EN;
- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
}
if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
scctl1 |= TV_SC_DDA3_EN;
scctl1 |= tv_mode->sc_reset;
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
color_conversion->av);
}
- I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+ if (IS_I965G(dev))
+ I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ else
+ I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
if (video_levels)
I915_WRITE(TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
+ intel_wait_for_vblank(dev);
}
/*
* A B C
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
- if (encoder->crtc) {
+ if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_output);
} else {
crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
type = -1;
}
+ tv_priv->type = type;
+
if (type < 0)
return connector_status_disconnected;
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode_ptr;
struct intel_output *intel_output = to_intel_output(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
- int j;
+ int j, count = 0;
+ u64 tmp;
for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
j++) {
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
&& !tv_mode->component_only))
continue;
- mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
- DRM_MEM_DRIVER);
+ mode_ptr = drm_mode_create(connector->dev);
+ if (!mode_ptr)
+ continue;
strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
mode_ptr->vtotal = vactive_s + 33;
- mode_ptr->clock = (int) (tv_mode->refresh *
- mode_ptr->vtotal *
- mode_ptr->htotal / 1000) / 1000;
+ tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+ tmp *= mode_ptr->htotal;
+ tmp = div_u64(tmp, 1000000);
+ mode_ptr->clock = (int) tmp;
mode_ptr->type = DRM_MODE_TYPE_DRIVER;
drm_mode_probed_add(connector, mode_ptr);
+ count++;
}
- return 0;
+ return count;
}
static void
OpenPOWER on IntegriCloud