diff options
Diffstat (limited to 'drivers/gpu')
56 files changed, 787 insertions, 489 deletions
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 09e11a5..fd9d0af 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -206,7 +206,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, size_t size; int ret; - DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", + DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); @@ -220,7 +220,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, size = mode_cmd.pitches[0] * mode_cmd.height; obj = drm_gem_cma_create(dev, size); - if (!obj) + if (IS_ERR(obj)) return -ENOMEM; fbi = framebuffer_alloc(0, dev->dev); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index cdf8b1e..d4b20ce 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -205,8 +205,6 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data) struct drm_gem_object *obj = ptr; struct seq_file *m = data; - seq_printf(m, "name %d size %zd\n", obj->name, obj->size); - seq_printf(m, "%6d %8zd %7d %8d\n", obj->name, obj->size, atomic_read(&obj->handle_count), @@ -239,7 +237,7 @@ int drm_vma_info(struct seq_file *m, void *data) mutex_lock(&dev->struct_mutex); seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n", atomic_read(&dev->vma_count), - high_memory, (void *)virt_to_phys(high_memory)); + high_memory, (void *)(unsigned long)virt_to_phys(high_memory)); list_for_each_entry(pt, &dev->vmalist, head) { vma = pt->vma; diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index aaeb6f8..b8a282ea 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -64,7 +64,6 @@ int drm_get_platform_dev(struct platform_device *platdev, } if (drm_core_check_feature(dev, DRIVER_MODESET)) { - dev_set_drvdata(&platdev->dev, dev); ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); if (ret) goto err_g1; diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 38f3a6c..3edd981 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -303,10 +303,10 @@ static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo) ch7xxx_readb(dvo, CH7xxx_PM, &val); - if (val & CH7xxx_PM_FPD) - return false; - else + if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP)) return true; + else + return false; } static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index aac4e5e..6770ee6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -118,6 +118,13 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); MODULE_PARM_DESC(i915_enable_ppgtt, "Enable PPGTT (default: true)"); +unsigned int i915_preliminary_hw_support __read_mostly = 0; +module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); +MODULE_PARM_DESC(preliminary_hw_support, + "Enable preliminary hardware support. " + "Enable Haswell and ValleyView Support. " + "(default: false)"); + static struct drm_driver driver; extern int intel_agp_enabled; @@ -826,6 +833,12 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; + if (intel_info->is_haswell || intel_info->is_valleyview) + if(!i915_preliminary_hw_support) { + DRM_ERROR("Preliminary hardware support disabled\n"); + return -ENODEV; + } + /* Only bind to function 0 of the device. Early generations * used function 1 as a placeholder for multi-head. This causes * us confusion instead, especially on the systems where both diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4f2831a..f511fa2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1217,6 +1217,7 @@ extern int i915_enable_rc6 __read_mostly; extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; extern int i915_enable_ppgtt __read_mostly; +extern unsigned int i915_preliminary_hw_support __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); @@ -1341,9 +1342,14 @@ int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) { struct scatterlist *sg = obj->pages->sgl; - while (n >= SG_MAX_SINGLE_ALLOC) { + int nents = obj->pages->nents; + while (nents > SG_MAX_SINGLE_ALLOC) { + if (n < SG_MAX_SINGLE_ALLOC - 1) + break; + sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); n -= SG_MAX_SINGLE_ALLOC - 1; + nents -= SG_MAX_SINGLE_ALLOC - 1; } return sg_page(sg+n); } @@ -1427,7 +1433,7 @@ int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev); int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, - struct drm_i915_gem_request *request); + u32 *seqno); int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19dbdd7..107f09b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1407,8 +1407,10 @@ out: return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; + case -ENOSPC: + return VM_FAULT_SIGBUS; default: - WARN_ON_ONCE(ret); + WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); return VM_FAULT_SIGBUS; } } @@ -1822,10 +1824,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) sg_set_page(sg, page, PAGE_SIZE, 0); } + obj->pages = st; + if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj); - obj->pages = st; return 0; err_pages: @@ -1955,11 +1958,12 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring) int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, - struct drm_i915_gem_request *request) + u32 *out_seqno) { drm_i915_private_t *dev_priv = ring->dev->dev_private; - uint32_t seqno; + struct drm_i915_gem_request *request; u32 request_ring_position; + u32 seqno; int was_empty; int ret; @@ -1974,11 +1978,9 @@ i915_add_request(struct intel_ring_buffer *ring, if (ret) return ret; - if (request == NULL) { - request = kmalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; - } + request = kmalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; seqno = i915_gem_next_request_seqno(ring); @@ -2030,6 +2032,8 @@ i915_add_request(struct intel_ring_buffer *ring, } } + if (out_seqno) + *out_seqno = seqno; return 0; } @@ -3959,6 +3963,9 @@ i915_gem_init_hw(struct drm_device *dev) if (!intel_enable_gtt()) return -EIO; + if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) + I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); + i915_gem_l3_remap(dev); i915_gem_init_swizzling(dev); @@ -4098,7 +4105,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 64c1be0..a4162dd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -521,7 +521,7 @@ */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 0x02090 -#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5) +#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 893f301..f78061a 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -219,20 +219,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, intel_encoder_to_crt(to_intel_encoder(encoder)); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = dev->dev_private; - int dpll_md_reg; - u32 adpa, dpll_md; - - dpll_md_reg = DPLL_MD(intel_crtc->pipe); - - /* - * Disable separate mode multiplier used when cloning SDVO to CRT - * XXX this needs to be adjusted when we really are cloning - */ - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - dpll_md = I915_READ(dpll_md_reg); - I915_WRITE(dpll_md_reg, - dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); - } + u32 adpa; adpa = ADPA_HOTPLUG_BITS; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2b6ce9b..461a637 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3253,6 +3253,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (HAS_PCH_CPT(dev)) intel_cpt_verify_modeset(dev, intel_crtc->pipe); + + /* + * There seems to be a race in PCH platform hw (at least on some + * outputs) where an enabled pipe still completes any pageflip right + * away (as if the pipe is off) instead of waiting for vblank. As soon + * as the first vblank happend, everything works as expected. Hence just + * wait for one vblank before returning to avoid strange things + * happening. + */ + intel_wait_for_vblank(dev, intel_crtc->pipe); } static void ironlake_crtc_disable(struct drm_crtc *crtc) @@ -7882,6 +7892,34 @@ struct intel_quirk { void (*hook)(struct drm_device *dev); }; +/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ +struct intel_dmi_quirk { + void (*hook)(struct drm_device *dev); + const struct dmi_system_id (*dmi_id_list)[]; +}; + +static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) +{ + DRM_INFO("Backlight polarity reversed on %s\n", id->ident); + return 1; +} + +static const struct intel_dmi_quirk intel_dmi_quirks[] = { + { + .dmi_id_list = &(const struct dmi_system_id[]) { + { + .callback = intel_dmi_reverse_brightness, + .ident = "NCR Corporation", + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, ""), + }, + }, + { } /* terminating entry */ + }, + .hook = quirk_invert_brightness, + }, +}; + static struct intel_quirk intel_quirks[] = { /* HP Mini needs pipe A force quirk (LP: #322104) */ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, @@ -7892,8 +7930,7 @@ static struct intel_quirk intel_quirks[] = { /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, - /* 855 & before need to leave pipe A & dpll A up */ - { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, + /* 830/845 need to leave pipe A & dpll A up */ { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, @@ -7922,6 +7959,10 @@ static void intel_init_quirks(struct drm_device *dev) q->subsystem_device == PCI_ANY_ID)) q->hook(dev); } + for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { + if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) + intel_dmi_quirks[i].hook(dev); + } } /* Disable the VGA plane that we never use */ @@ -8049,29 +8090,42 @@ static void intel_enable_pipe_a(struct drm_device *dev) } +static bool +intel_check_plane_mapping(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + u32 reg, val; + + if (dev_priv->num_pipe == 1) + return true; + + reg = DSPCNTR(!crtc->plane); + val = I915_READ(reg); + + if ((val & DISPLAY_PLANE_ENABLE) && + (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) + return false; + + return true; +} + static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg, val; + u32 reg; /* Clear any frame start delays used for debugging left by the BIOS */ reg = PIPECONF(crtc->pipe); I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); /* We need to sanitize the plane -> pipe mapping first because this will - * disable the crtc (and hence change the state) if it is wrong. */ - if (!HAS_PCH_SPLIT(dev)) { + * disable the crtc (and hence change the state) if it is wrong. Note + * that gen4+ has a fixed plane -> pipe mapping. */ + if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { struct intel_connector *connector; bool plane; - reg = DSPCNTR(crtc->plane); - val = I915_READ(reg); - - if ((val & DISPLAY_PLANE_ENABLE) == 0 && - (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) - goto ok; - DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", crtc->base.base.id); @@ -8095,7 +8149,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) WARN_ON(crtc->active); crtc->base.enabled = false; } -ok: if (dev_priv->quirks & QUIRK_PIPEA_FORCE && crtc->pipe == PIPE_A && !crtc->active) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d1e8ddb..368ed8e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1797,7 +1797,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; if (i == intel_dp->lane_count && voltage_tries == 5) { - if (++loop_tries == 5) { + ++loop_tries; + if (loop_tries == 5) { DRM_DEBUG_KMS("too many full retries, give up\n"); break; } @@ -1807,11 +1808,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) } /* Check to see if we've tried the same voltage 5 times */ - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - voltage_tries = 0; - } else + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { ++voltage_tries; + if (voltage_tries == 5) { + DRM_DEBUG_KMS("too many voltage retries, give up\n"); + break; + } + } else + voltage_tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new intel_dp->train_set as requested by target */ intel_get_adjust_train(intel_dp, link_status); @@ -2369,8 +2374,9 @@ static void intel_dp_destroy(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct intel_dp *intel_dp = intel_attached_dp(connector); - if (intel_dpd_is_edp(dev)) + if (is_edp(intel_dp)) intel_panel_destroy_backlight(dev); drm_sysfs_connector_remove(connector); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e3166df..edba93b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -777,6 +777,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Supermicro X7SPA-H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index ebff850..4956259 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -209,7 +209,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, } static int intel_overlay_do_wait_request(struct intel_overlay *overlay, - struct drm_i915_gem_request *request, void (*tail)(struct intel_overlay *)) { struct drm_device *dev = overlay->dev; @@ -218,12 +217,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; BUG_ON(overlay->last_flip_req); - ret = i915_add_request(ring, NULL, request); - if (ret) { - kfree(request); - return ret; - } - overlay->last_flip_req = request->seqno; + ret = i915_add_request(ring, NULL, &overlay->last_flip_req); + if (ret) + return ret; + overlay->flip_tail = tail; ret = i915_wait_seqno(ring, overlay->last_flip_req); if (ret) @@ -240,7 +237,6 @@ static int intel_overlay_on(struct intel_overlay *overlay) struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - struct drm_i915_gem_request *request; int ret; BUG_ON(overlay->active); @@ -248,17 +244,9 @@ static int intel_overlay_on(struct intel_overlay *overlay) WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) { - ret = -ENOMEM; - goto out; - } - ret = intel_ring_begin(ring, 4); - if (ret) { - kfree(request); - goto out; - } + if (ret) + return ret; intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); @@ -266,9 +254,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); - ret = intel_overlay_do_wait_request(overlay, request, NULL); -out: - return ret; + return intel_overlay_do_wait_request(overlay, NULL); } /* overlay needs to be enabled in OCMD reg */ @@ -278,17 +264,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay, struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - struct drm_i915_gem_request *request; u32 flip_addr = overlay->flip_addr; u32 tmp; int ret; BUG_ON(!overlay->active); - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; - if (load_polyphase_filter) flip_addr |= OFC_UPDATE; @@ -298,22 +279,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay, DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); ret = intel_ring_begin(ring, 2); - if (ret) { - kfree(request); + if (ret) return ret; - } + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); intel_ring_emit(ring, flip_addr); intel_ring_advance(ring); - ret = i915_add_request(ring, NULL, request); - if (ret) { - kfree(request); - return ret; - } - - overlay->last_flip_req = request->seqno; - return 0; + return i915_add_request(ring, NULL, &overlay->last_flip_req); } static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) @@ -349,15 +322,10 @@ static int intel_overlay_off(struct intel_overlay *overlay) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; u32 flip_addr = overlay->flip_addr; - struct drm_i915_gem_request *request; int ret; BUG_ON(!overlay->active); - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; - /* According to intel docs the overlay hw may hang (when switching * off) without loading the filter coeffs. It is however unclear whether * this applies to the disabling of the overlay or to the switching off @@ -365,10 +333,9 @@ static int intel_overlay_off(struct intel_overlay *overlay) flip_addr |= OFC_UPDATE; ret = intel_ring_begin(ring, 6); - if (ret) { - kfree(request); + if (ret) return ret; - } + /* wait for overlay to go idle */ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); intel_ring_emit(ring, flip_addr); @@ -379,8 +346,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); intel_ring_advance(ring); - return intel_overlay_do_wait_request(overlay, request, - intel_overlay_off_tail); + return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail); } /* recover from an interruption due to a signal @@ -425,24 +391,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) return 0; if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { - struct drm_i915_gem_request *request; - /* synchronous slowpath */ - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; - ret = intel_ring_begin(ring, 2); - if (ret) { - kfree(request); + if (ret) return ret; - } intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); - ret = intel_overlay_do_wait_request(overlay, request, + ret = intel_overlay_do_wait_request(overlay, intel_overlay_release_old_vid_tail); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b3b4b6c..72f41aa 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3442,8 +3442,8 @@ static void gen6_init_clock_gating(struct drm_device *dev) GEN6_RCCUNIT_CLOCK_GATE_DISABLE); /* Bspec says we need to always set all mask bits. */ - I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) | - _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL); + I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) | + _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL); /* * According to the spec the following bits should be diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 0007a4d..c01d97d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -139,6 +139,11 @@ struct intel_sdvo { /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; + + /* + * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd + */ + uint8_t dtd_sdvo_flags; }; struct intel_sdvo_connector { @@ -984,6 +989,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, return false; intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags; return true; } @@ -1092,6 +1098,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, * adjusted_mode. */ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); + if (intel_sdvo->is_tv || intel_sdvo->is_lvds) + input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) DRM_INFO("Setting input timings on %s failed\n", SDVO_NAME(intel_sdvo)); @@ -2277,10 +2285,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } - /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling, - * as opposed to native LVDS, where we upscale with the panel-fitter - * (and hence only the native LVDS resolution could be cloned). */ - intel_sdvo->base.cloneable = true; + /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */ + intel_sdvo->base.cloneable = false; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c index 1f34549..70586fd 100644 --- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c +++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c @@ -39,6 +39,11 @@ nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj) nv_wo32(gpuobj, i, 0x00000000); } + if (gpuobj->node) { + nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap, + &gpuobj->node); + } + if (gpuobj->heap.block_size) nouveau_mm_fini(&gpuobj->heap); diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index bfddf87..a6d3cd6 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c @@ -218,13 +218,16 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; - node->offset = roundup(offset, mm->block_size); - node->length = rounddown(offset + length, mm->block_size) - node->offset; + + if (length) { + node->offset = roundup(offset, mm->block_size); + node->length = rounddown(offset + length, mm->block_size); + node->length -= node->offset; + } list_add_tail(&node->nl_entry, &mm->nodes); list_add_tail(&node->fl_entry, &mm->free); mm->heap_nodes++; - mm->heap_size += length; return 0; } @@ -236,7 +239,7 @@ nouveau_mm_fini(struct nouveau_mm *mm) int nodes = 0; list_for_each_entry(node, &mm->nodes, nl_entry) { - if (nodes++ == mm->heap_nodes) + if (WARN_ON(nodes++ == mm->heap_nodes)) return -EBUSY; } diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h index 9ee9bf4..975137b 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/mm.h +++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h @@ -19,7 +19,6 @@ struct nouveau_mm { u32 block_size; int heap_nodes; - u32 heap_size; }; int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index dcb5c2b..70ca7d5a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -72,7 +72,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios) } data = of_get_property(dn, "NVDA,BMP", &size); - if (data) { + if (data && size) { bios->size = size; bios->data = kmalloc(bios->size, GFP_KERNEL); if (bios->data) @@ -104,6 +104,9 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios) goto out; bios->size = nv_rd08(bios, 0x700002) * 512; + if (!bios->size) + goto out; + bios->data = kmalloc(bios->size, GFP_KERNEL); if (bios->data) { for (i = 0; i < bios->size; i++) @@ -155,6 +158,9 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) /* read entire bios image to system memory */ bios->size = nv_rd08(bios, 0x300002) * 512; + if (!bios->size) + goto out; + bios->data = kmalloc(bios->size, GFP_KERNEL); if (bios->data) { for (i = 0; i < bios->size; i++) @@ -186,14 +192,22 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios) { struct pci_dev *pdev = nv_device(bios)->pdev; int ret, cnt, i; - u8 data[3]; - if (!nouveau_acpi_rom_supported(pdev)) + if (!nouveau_acpi_rom_supported(pdev)) { + bios->data = NULL; return; + } bios->size = 0; - if (nouveau_acpi_get_bios_chunk(data, 0, 3) == 3) - bios->size = data[2] * 512; + bios->data = kmalloc(4096, GFP_KERNEL); + if (bios->data) { + if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096) + bios->size = bios->data[2] * 512; + kfree(bios->data); + } + + if (!bios->size) + return; bios->data = kmalloc(bios->size, GFP_KERNEL); for (i = 0; bios->data && i < bios->size; i += cnt) { @@ -229,12 +243,14 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) static int nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) { - if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) { + if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 || + bios->data[1] != 0xAA) { nv_info(bios, "... signature not found\n"); return 0; } - if (nvbios_checksum(bios->data, bios->data[2] * 512)) { + if (nvbios_checksum(bios->data, + min_t(u32, bios->data[2] * 512, bios->size))) { nv_info(bios, "... checksum invalid\n"); /* if a ro image is somewhat bad, it's probably all rubbish */ return writeable ? 2 : 1; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c index 9ed6e72..7d75038 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c @@ -43,7 +43,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) *ver = nv_ro08(bios, dcb); if (*ver >= 0x41) { - nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver); + nv_warn(bios, "DCB version 0x%02x unknown\n", *ver); return 0x0000; } else if (*ver >= 0x30) { diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c index 5e5f4cd..f835501 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c @@ -157,11 +157,10 @@ pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) while (map->reg) { if (map->reg == reg && *ver >= 0x20) { u16 addr = (data += hdr); + *type = map->type; while (cnt--) { - if (nv_ro32(bios, data) == map->reg) { - *type = map->type; + if (nv_ro32(bios, data) == map->reg) return data; - } data += *len; } return addr; @@ -200,11 +199,10 @@ pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) while (map->reg) { if (map->type == type && *ver >= 0x20) { u16 addr = (data += hdr); + *reg = map->reg; while (cnt--) { - if (nv_ro32(bios, data) == map->reg) { - *reg = map->reg; + if (nv_ro32(bios, data) == map->reg) return data; - } data += *len; } return addr; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 436e9ef..5f570806 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c @@ -219,13 +219,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ((priv->base.ram.size & 0x000000ff) << 32); tags = nv_rd32(priv, 0x100320); - if (tags) { - ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1); - if (ret) - return ret; + ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1); + if (ret) + return ret; - nv_debug(priv, "%d compression tags\n", tags); - } + nv_debug(priv, "%d compression tags\n", tags); size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail; switch (device->chipset) { @@ -237,6 +235,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12; + priv->base.ram.type = NV_MEM_TYPE_STOLEN; break; default: ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, @@ -277,7 +276,6 @@ nv50_fb_dtor(struct nouveau_object *object) __free_page(priv->r100c08_page); } - nouveau_mm_fini(&priv->base.vram); nouveau_fb_destroy(&priv->base); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index 3d2c883..dbfc2abf 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c @@ -292,7 +292,7 @@ nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, case DCB_I2C_NVIO_BIT: port->drive = info.drive & 0x0f; if (device->card_type < NV_D0) { - if (info.drive >= ARRAY_SIZE(nv50_i2c_port)) + if (port->drive >= ARRAY_SIZE(nv50_i2c_port)) break; port->drive = nv50_i2c_port[port->drive]; port->sense = port->drive; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c index b292379..5231786 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c @@ -134,7 +134,7 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm) end = ptimer->read(ptimer); if (cycles == 5) { - tach = (u64)60000000000; + tach = (u64)60000000000ULL; do_div(tach, (end - start)); return tach; } else diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c index 0203e1e..49050d9 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c @@ -92,7 +92,8 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv04_vmmgr_priv *priv; int ret; - if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, data, size, pobject); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c index 0ac18d0..aa81314 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c @@ -163,7 +163,8 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv04_vmmgr_priv *priv; int ret; - if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, data, size, pobject); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 259e5f1..35ac57f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -456,6 +456,7 @@ static struct ttm_tt * nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read) { +#if __OS_HAS_AGP struct nouveau_drm *drm = nouveau_bdev(bdev); struct drm_device *dev = drm->dev; @@ -463,6 +464,7 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, return ttm_agp_tt_create(bdev, dev->agp->bridge, size, page_flags, dummy_read); } +#endif return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 8f98e5a..86124b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -290,6 +290,7 @@ nouveau_display_create(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_disp *pdisp = nouveau_disp(drm->device); struct nouveau_display *disp; + u32 pclass = dev->pdev->class >> 8; int ret, gen; disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); @@ -360,23 +361,27 @@ nouveau_display_create(struct drm_device *dev) drm_kms_helper_poll_init(dev); drm_kms_helper_poll_disable(dev); - if (nv_device(drm->device)->card_type < NV_50) - ret = nv04_display_create(dev); - else - if (nv_device(drm->device)->card_type < NV_D0) - ret = nv50_display_create(dev); - else - ret = nvd0_display_create(dev); - if (ret) - goto disp_create_err; - - if (dev->mode_config.num_crtc) { - ret = drm_vblank_init(dev, dev->mode_config.num_crtc); + if (nouveau_modeset == 1 || + (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) { + if (nv_device(drm->device)->card_type < NV_50) + ret = nv04_display_create(dev); + else + if (nv_device(drm->device)->card_type < NV_D0) + ret = nv50_display_create(dev); + else + ret = nvd0_display_create(dev); if (ret) - goto vblank_err; + goto disp_create_err; + + if (dev->mode_config.num_crtc) { + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); + if (ret) + goto vblank_err; + } + + nouveau_backlight_init(dev); } - nouveau_backlight_init(dev); return 0; vblank_err: @@ -395,7 +400,8 @@ nouveau_display_destroy(struct drm_device *dev) nouveau_backlight_exit(dev); drm_vblank_cleanup(dev); - disp->dtor(dev); + if (disp->dtor) + disp->dtor(dev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); @@ -530,9 +536,11 @@ nouveau_page_flip_reserve(struct nouveau_bo *old_bo, if (ret) goto fail; - ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); - if (ret) - goto fail_unreserve; + if (likely(old_bo != new_bo)) { + ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); + if (ret) + goto fail_unreserve; + } return 0; @@ -551,8 +559,10 @@ nouveau_page_flip_unreserve(struct nouveau_bo *old_bo, nouveau_bo_fence(new_bo, fence); ttm_bo_unreserve(&new_bo->bo); - nouveau_bo_fence(old_bo, fence); - ttm_bo_unreserve(&old_bo->bo); + if (likely(old_bo != new_bo)) { + nouveau_bo_fence(old_bo, fence); + ttm_bo_unreserve(&old_bo->bo); + } nouveau_bo_unpin(old_bo); } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index ccae8c26..0910125 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -63,8 +63,9 @@ MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration"); static int nouveau_noaccel = 0; module_param_named(noaccel, nouveau_noaccel, int, 0400); -MODULE_PARM_DESC(modeset, "enable driver"); -static int nouveau_modeset = -1; +MODULE_PARM_DESC(modeset, "enable driver (default: auto, " + "0 = disabled, 1 = enabled, 2 = headless)"); +int nouveau_modeset = -1; module_param_named(modeset, nouveau_modeset, int, 0400); static struct drm_driver driver; @@ -363,7 +364,8 @@ nouveau_drm_unload(struct drm_device *dev) nouveau_pm_fini(dev); - nouveau_display_fini(dev); + if (dev->mode_config.num_crtc) + nouveau_display_fini(dev); nouveau_display_destroy(dev); nouveau_irq_fini(dev); @@ -403,13 +405,15 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state) pm_state.event == PM_EVENT_PRETHAW) return 0; - NV_INFO(drm, "suspending fbcon...\n"); - nouveau_fbcon_set_suspend(dev, 1); + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "suspending fbcon...\n"); + nouveau_fbcon_set_suspend(dev, 1); - NV_INFO(drm, "suspending display...\n"); - ret = nouveau_display_suspend(dev); - if (ret) - return ret; + NV_INFO(drm, "suspending display...\n"); + ret = nouveau_display_suspend(dev); + if (ret) + return ret; + } NV_INFO(drm, "evicting buffers...\n"); ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); @@ -445,8 +449,10 @@ fail_client: nouveau_client_init(&cli->base); } - NV_INFO(drm, "resuming display...\n"); - nouveau_display_resume(dev); + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "resuming display...\n"); + nouveau_display_resume(dev); + } return ret; } @@ -486,8 +492,10 @@ nouveau_drm_resume(struct pci_dev *pdev) nouveau_irq_postinstall(dev); nouveau_pm_resume(dev); - NV_INFO(drm, "resuming display...\n"); - nouveau_display_resume(dev); + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "resuming display...\n"); + nouveau_display_resume(dev); + } return 0; } @@ -662,9 +670,7 @@ nouveau_drm_init(void) #ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force()) nouveau_modeset = 0; - else #endif - nouveau_modeset = 1; } if (!nouveau_modeset) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 8194712..a101699 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -141,4 +141,6 @@ int nouveau_drm_resume(struct pci_dev *); nv_info((cli), fmt, ##args); \ } while (0) +extern int nouveau_modeset; + #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 9ca8afd..1d8cb50 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -61,13 +61,15 @@ nouveau_irq_handler(DRM_IRQ_ARGS) nv_subdev(pmc)->intr(nv_subdev(pmc)); - if (device->card_type >= NV_D0) { - if (nv_rd32(device, 0x000100) & 0x04000000) - nvd0_display_intr(dev); - } else - if (device->card_type >= NV_50) { - if (nv_rd32(device, 0x000100) & 0x04000000) - nv50_display_intr(dev); + if (dev->mode_config.num_crtc) { + if (device->card_type >= NV_D0) { + if (nv_rd32(device, 0x000100) & 0x04000000) + nvd0_display_intr(dev); + } else + if (device->card_type >= NV_50) { + if (nv_rd32(device, 0x000100) & 0x04000000) + nv50_display_intr(dev); + } } return IRQ_HANDLED; diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 0bf64c9..5566172 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -52,7 +52,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl, { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_pm *pm = nouveau_pm(dev); - struct nouveau_therm *therm = nouveau_therm(drm); + struct nouveau_therm *therm = nouveau_therm(drm->device); int ret; /*XXX: not on all boards, we should control based on temperature @@ -64,7 +64,6 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl, ret = therm->fan_set(therm, perflvl->fanspeed); if (ret && ret != -ENODEV) { NV_ERROR(drm, "fanspeed set failed: %d\n", ret); - return ret; } } @@ -706,8 +705,7 @@ nouveau_hwmon_init(struct drm_device *dev) struct device *hwmon_dev; int ret = 0; - if (!therm || !therm->temp_get || !therm->attr_get || - !therm->attr_set || therm->temp_get(therm) < 0) + if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set) return -ENODEV; hwmon_dev = hwmon_device_register(&dev->pdev->dev); diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index 347a3bd..64f7020 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -220,7 +220,7 @@ out: NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); if (blue == 0x18) { - NV_INFO(drm, "Load detected on head A\n"); + NV_DEBUG(drm, "Load detected on head A\n"); return connector_status_connected; } @@ -338,8 +338,8 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) if (nv17_dac_sample_load(encoder) & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { - NV_INFO(drm, "Load detected on output %c\n", - '@' + ffs(dcb->or)); + NV_DEBUG(drm, "Load detected on output %c\n", + '@' + ffs(dcb->or)); return connector_status_connected; } else { return connector_status_disconnected; @@ -413,9 +413,9 @@ static void nv04_dac_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", - drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), - nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) @@ -461,8 +461,8 @@ static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n", - mode, nv_encoder->dcb->index); + NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n", + mode, nv_encoder->dcb->index); nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); } diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index da55d76..184cdf8 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -476,9 +476,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", - drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), - nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) @@ -520,8 +520,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n", - mode, nv_encoder->dcb->index); + NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n", + mode, nv_encoder->dcb->index); if (was_powersaving && is_powersaving_dpms(mode)) return; @@ -565,8 +565,8 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n", - mode, nv_encoder->dcb->index); + NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n", + mode, nv_encoder->dcb->index); nv04_dfp_update_backlight(encoder, mode); nv04_dfp_update_fp_control(encoder, mode); diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index 099fbed..62e826a 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -75,8 +75,8 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; uint8_t crtc1A; - NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n", - mode, nv_encoder->dcb->index); + NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n", + mode, nv_encoder->dcb->index); state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); @@ -167,9 +167,8 @@ static void nv04_tv_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", - drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, - '@' + ffs(nv_encoder->dcb->or)); + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } static void nv04_tv_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 96184d0..2e566e12 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1690,10 +1690,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) } /* all other cases */ pll_in_use = radeon_get_pll_use_mask(crtc); - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { @@ -1715,10 +1715,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) } /* all other cases */ pll_in_use = radeon_get_pll_use_mask(crtc); - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 49cbb37..ba498f8 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, struct radeon_backlight_privdata *pdata; struct radeon_encoder_atom_dig *dig; u8 backlight_level; + char bl_name[16]; if (!radeon_encoder->enc_priv) return; @@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, memset(&props, 0, sizeof(props)); props.max_brightness = RADEON_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; - bd = backlight_device_register("radeon_bl", &drm_connector->kdev, + snprintf(bl_name, sizeof(bl_name), + "radeon_bl%d", dev->primary->index); + bd = backlight_device_register(bl_name, &drm_connector->kdev, pdata, &radeon_atom_backlight_ops, &props); if (IS_ERR(bd)) { DRM_ERROR("Backlight registration failed\n"); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a1f49c5..14313ad 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3431,9 +3431,14 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev) if (!(mask & DRM_PCIE_SPEED_50)) return; + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + if (speed_cntl & LC_CURRENT_DATA_RATE) { + DRM_INFO("PCIE gen 2 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); - speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 573ed1b..30271b64 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg) case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: return true; default: + DRM_ERROR("Invalid register 0x%x in CS\n", reg); return false; } } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 8bcb554..81e6a56 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -770,9 +770,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) WREG32(0x15DC, 0); /* empty context1-7 */ + /* Assign the pt base to something valid for now; the pts used for + * the VMs are determined by the application and setup and assigned + * on the fly in the vm part of radeon_gart.c + */ for (i = 1; i < 8; i++) { WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), rdev->gart.table_addr >> 12); } @@ -1534,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, { struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - int i; - radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2)); - radeon_ring_write(ring, pe); - radeon_ring_write(ring, upper_32_bits(pe) & 0xff); - for (i = 0; i < count; ++i) { - uint64_t value = 0; - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - addr += incr; - - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - addr += incr; - } + while (count) { + unsigned ndw = 1 + count * 2; + if (ndw > 0x3FFF) + ndw = 0x3FFF; + + radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw)); + radeon_ring_write(ring, pe); + radeon_ring_write(ring, upper_32_bits(pe) & 0xff); + for (; ndw > 1; ndw -= 2, --count, pe += 8) { + uint64_t value = 0; + if (flags & RADEON_VM_PAGE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + addr += incr; + + } else if (flags & RADEON_VM_PAGE_VALID) { + value = addr; + addr += incr; + } - value |= r600_flags; - radeon_ring_write(ring, value); - radeon_ring_write(ring, upper_32_bits(value)); + value |= r600_flags; + radeon_ring_write(ring, value); + radeon_ring_write(ring, upper_32_bits(value)); + } } } @@ -1572,12 +1581,6 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) if (vm == NULL) return; - radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0)); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0)); - radeon_ring_write(ring, vm->last_pfn); - radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); radeon_ring_write(ring, vm->pd_gpu_addr >> 12); @@ -1588,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) /* bits 0-7 are the VM contexts0-7 */ radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); radeon_ring_write(ring, 1 << vm->id); + + /* sync PFP to ME, otherwise we might get invalid PFP reads */ + radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + radeon_ring_write(ring, 0x0); } diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 2423d1b..cbef681 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -502,6 +502,7 @@ #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_WAIT_REG_MEM 0x3C #define PACKET3_MEM_WRITE 0x3D +#define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 70c800f..cda280d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3703,6 +3703,12 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) if (!(mask & DRM_PCIE_SPEED_50)) return; + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + if (speed_cntl & LC_CURRENT_DATA_RATE) { + DRM_INFO("PCIE gen 2 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); /* 55 nm r6xx asics */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b04c064..8c42d54 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -663,9 +663,14 @@ struct radeon_vm { struct list_head list; struct list_head va; unsigned id; - unsigned last_pfn; - u64 pd_gpu_addr; - struct radeon_sa_bo *sa_bo; + + /* contains the page directory */ + struct radeon_sa_bo *page_directory; + uint64_t pd_gpu_addr; + + /* array of page tables, one for each page directory entry */ + struct radeon_sa_bo **page_tables; + struct mutex mutex; /* last fence for cs using this vm */ struct radeon_fence *fence; @@ -1843,9 +1848,10 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size */ int radeon_vm_manager_init(struct radeon_device *rdev); void radeon_vm_manager_fini(struct radeon_device *rdev); -int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); +void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); +void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, struct radeon_vm *vm, int ring); void radeon_vm_fence(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index b0a5688..196d28d 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -201,7 +201,7 @@ static int radeon_atif_verify_interface(acpi_handle handle, size = *(u16 *) info->buffer.pointer; if (size < 12) { - DRM_INFO("ATIF buffer is too small: %lu\n", size); + DRM_INFO("ATIF buffer is too small: %zu\n", size); err = -EINVAL; goto out; } @@ -370,6 +370,7 @@ int radeon_atif_handler(struct radeon_device *rdev, radeon_set_backlight_level(rdev, enc, req.backlight_level); +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *dig = enc->enc_priv; backlight_force_update(dig->bl_dev, @@ -379,6 +380,7 @@ int radeon_atif_handler(struct radeon_device *rdev, backlight_force_update(dig->bl_dev, BACKLIGHT_UPDATE_HOTKEY); } +#endif } } /* TODO: check other events */ @@ -485,7 +487,7 @@ static int radeon_atcs_verify_interface(acpi_handle handle, size = *(u16 *) info->buffer.pointer; if (size < 8) { - DRM_INFO("ATCS buffer is too small: %lu\n", size); + DRM_INFO("ATCS buffer is too small: %zu\n", size); err = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 582e994..37f6a90 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, atpx_arg_elements[1].integer.value = 0; } - status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer); + status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); /* Fail only if calling the method fails and ATPX is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { @@ -148,7 +148,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) size = *(u16 *) info->buffer.pointer; if (size < 8) { - printk("ATPX buffer is too small: %lu\n", size); + printk("ATPX buffer is too small: %zu\n", size); err = -EINVAL; goto out; } @@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, } /** - * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles + * radeon_atpx_pci_probe_handle - look up the ATPX handle * * @pdev: pci device * - * Look up the ATPX and ATRM handles (all asics). + * Look up the ATPX handles (all asics). * Returns true if the handles are found, false if not. */ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index cb7b7c0..41672cc 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -478,6 +478,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, } out: + radeon_vm_add_to_lru(rdev, vm); mutex_unlock(&vm->mutex); mutex_unlock(&rdev->vm_manager.lock); return r; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 64a4264..e2f5f88 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev) */ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) { + uint64_t limit = (uint64_t)radeon_vram_limit << 20; + mc->vram_start = base; if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); @@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) - mc->real_vram_size = radeon_vram_limit; + if (limit && limit < mc->real_vram_size) + mc->real_vram_size = limit; dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); @@ -835,6 +837,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) } /** + * radeon_check_pot_argument - check that argument is a power of two + * + * @arg: value to check + * + * Validates that a certain argument is a power of two (all asics). + * Returns true if argument is valid. + */ +static bool radeon_check_pot_argument(int arg) +{ + return (arg & (arg - 1)) == 0; +} + +/** * radeon_check_arguments - validate module params * * @rdev: radeon_device pointer @@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) static void radeon_check_arguments(struct radeon_device *rdev) { /* vramlimit must be a power of two */ - switch (radeon_vram_limit) { - case 0: - case 4: - case 8: - case 16: - case 32: - case 64: - case 128: - case 256: - case 512: - case 1024: - case 2048: - case 4096: - break; - default: + if (!radeon_check_pot_argument(radeon_vram_limit)) { dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", radeon_vram_limit); radeon_vram_limit = 0; - break; } - radeon_vram_limit = radeon_vram_limit << 20; + /* gtt size must be power of two and greater or equal to 32M */ - switch (radeon_gart_size) { - case 4: - case 8: - case 16: + if (radeon_gart_size < 32) { dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", radeon_gart_size); radeon_gart_size = 512; - break; - case 32: - case 64: - case 128: - case 256: - case 512: - case 1024: - case 2048: - case 4096: - break; - default: + + } else if (!radeon_check_pot_argument(radeon_gart_size)) { dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", radeon_gart_size); radeon_gart_size = 512; - break; } - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; + /* AGP mode can only be -1, 1, 2, 4, 8 */ switch (radeon_agpmode) { case -1: @@ -1018,6 +1006,10 @@ int radeon_device_init(struct radeon_device *rdev, return r; /* initialize vm here */ mutex_init(&rdev->vm_manager.lock); + /* Adjust VM size here. + * Currently set to 4GB ((1 << 20) 4k pages). + * Max GPUVM size for cayman and SI is 40 bits. + */ rdev->vm_manager.max_pfn = 1 << 20; INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index f0c06d1..4debd60 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev) DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); /* Allocate pages table */ - rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages, - GFP_KERNEL); + rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); if (rdev->gart.pages == NULL) { radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) * - rdev->gart.num_cpu_pages, GFP_KERNEL); + rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * + rdev->gart.num_cpu_pages); if (rdev->gart.pages_addr == NULL) { radeon_gart_fini(rdev); return -ENOMEM; @@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev) radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); } rdev->gart.ready = false; - kfree(rdev->gart.pages); - kfree(rdev->gart.pages_addr); + vfree(rdev->gart.pages); + vfree(rdev->gart.pages_addr); rdev->gart.pages = NULL; rdev->gart.pages_addr = NULL; @@ -423,6 +422,18 @@ void radeon_gart_fini(struct radeon_device *rdev) */ /** + * radeon_vm_num_pde - return the number of page directory entries + * + * @rdev: radeon_device pointer + * + * Calculate the number of page directory entries (cayman+). + */ +static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) +{ + return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; +} + +/** * radeon_vm_directory_size - returns the size of the page directory in bytes * * @rdev: radeon_device pointer @@ -431,7 +442,7 @@ void radeon_gart_fini(struct radeon_device *rdev) */ static unsigned radeon_vm_directory_size(struct radeon_device *rdev) { - return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8; + return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); } /** @@ -451,11 +462,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev) if (!rdev->vm_manager.enabled) { /* allocate enough for 2 full VM pts */ - size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); - size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8); + size = radeon_vm_directory_size(rdev); + size += rdev->vm_manager.max_pfn * 8; size *= 2; r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, - size, + RADEON_GPU_PAGE_ALIGN(size), RADEON_GEM_DOMAIN_VRAM); if (r) { dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", @@ -476,7 +487,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) /* restore page table */ list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { - if (vm->sa_bo == NULL) + if (vm->page_directory == NULL) continue; list_for_each_entry(bo_va, &vm->va, vm_list) { @@ -500,16 +511,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev, struct radeon_vm *vm) { struct radeon_bo_va *bo_va; + int i; - if (!vm->sa_bo) + if (!vm->page_directory) return; list_del_init(&vm->list); - radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); + radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); list_for_each_entry(bo_va, &vm->va, vm_list) { bo_va->valid = false; } + + if (vm->page_tables == NULL) + return; + + for (i = 0; i < radeon_vm_num_pdes(rdev); i++) + radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); + + kfree(vm->page_tables); } /** @@ -546,63 +566,106 @@ void radeon_vm_manager_fini(struct radeon_device *rdev) } /** + * radeon_vm_evict - evict page table to make room for new one + * + * @rdev: radeon_device pointer + * @vm: VM we want to allocate something for + * + * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). + * Returns 0 for success, -ENOMEM for failure. + * + * Global and local mutex must be locked! + */ +static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) +{ + struct radeon_vm *vm_evict; + + if (list_empty(&rdev->vm_manager.lru_vm)) + return -ENOMEM; + + vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, + struct radeon_vm, list); + if (vm_evict == vm) + return -ENOMEM; + + mutex_lock(&vm_evict->mutex); + radeon_vm_free_pt(rdev, vm_evict); + mutex_unlock(&vm_evict->mutex); + return 0; +} + +/** * radeon_vm_alloc_pt - allocates a page table for a VM * * @rdev: radeon_device pointer * @vm: vm to bind * * Allocate a page table for the requested vm (cayman+). - * Also starts to populate the page table. * Returns 0 for success, error for failure. * * Global and local mutex must be locked! */ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) { - struct radeon_vm *vm_evict; - int r; + unsigned pd_size, pts_size; u64 *pd_addr; - int tables_size; + int r; if (vm == NULL) { return -EINVAL; } - /* allocate enough to cover the current VM size */ - tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); - tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8); - - if (vm->sa_bo != NULL) { - /* update lru */ - list_del_init(&vm->list); - list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); + if (vm->page_directory != NULL) { return 0; } retry: - r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, - tables_size, RADEON_GPU_PAGE_SIZE, false); + pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); + r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, + &vm->page_directory, pd_size, + RADEON_GPU_PAGE_SIZE, false); if (r == -ENOMEM) { - if (list_empty(&rdev->vm_manager.lru_vm)) { + r = radeon_vm_evict(rdev, vm); + if (r) return r; - } - vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); - mutex_lock(&vm_evict->mutex); - radeon_vm_free_pt(rdev, vm_evict); - mutex_unlock(&vm_evict->mutex); goto retry; } else if (r) { return r; } - pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo); - vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); - memset(pd_addr, 0, tables_size); + vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); + + /* Initially clear the page directory */ + pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); + memset(pd_addr, 0, pd_size); + + pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); + vm->page_tables = kzalloc(pts_size, GFP_KERNEL); + + if (vm->page_tables == NULL) { + DRM_ERROR("Cannot allocate memory for page table array\n"); + radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); + return -ENOMEM; + } + + return 0; +} +/** + * radeon_vm_add_to_lru - add VMs page table to LRU list + * + * @rdev: radeon_device pointer + * @vm: vm to add to LRU + * + * Add the allocated page table to the LRU list (cayman+). + * + * Global mutex must be locked! + */ +void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) +{ + list_del_init(&vm->list); list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); - return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, - &rdev->ring_tmp_bo.bo->tbo.mem); } /** @@ -793,20 +856,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, } mutex_lock(&vm->mutex); - if (last_pfn > vm->last_pfn) { - /* release mutex and lock in right order */ - mutex_unlock(&vm->mutex); - mutex_lock(&rdev->vm_manager.lock); - mutex_lock(&vm->mutex); - /* and check again */ - if (last_pfn > vm->last_pfn) { - /* grow va space 32M by 32M */ - unsigned align = ((32 << 20) >> 12) - 1; - radeon_vm_free_pt(rdev, vm); - vm->last_pfn = (last_pfn + align) & ~align; - } - mutex_unlock(&rdev->vm_manager.lock); - } head = &vm->va; last_offset = 0; list_for_each_entry(tmp, &vm->va, vm_list) { @@ -865,6 +914,154 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) } /** + * radeon_vm_update_pdes - make sure that page directory is valid + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * + * Allocates new page tables if necessary + * and updates the page directory (cayman+). + * Returns 0 for success, error for failure. + * + * Global and local mutex must be locked! + */ +static int radeon_vm_update_pdes(struct radeon_device *rdev, + struct radeon_vm *vm, + uint64_t start, uint64_t end) +{ + static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; + + uint64_t last_pde = ~0, last_pt = ~0; + unsigned count = 0; + uint64_t pt_idx; + int r; + + start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; + end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; + + /* walk over the address space and update the page directory */ + for (pt_idx = start; pt_idx <= end; ++pt_idx) { + uint64_t pde, pt; + + if (vm->page_tables[pt_idx]) + continue; + +retry: + r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, + &vm->page_tables[pt_idx], + RADEON_VM_PTE_COUNT * 8, + RADEON_GPU_PAGE_SIZE, false); + + if (r == -ENOMEM) { + r = radeon_vm_evict(rdev, vm); + if (r) + return r; + goto retry; + } else if (r) { + return r; + } + + pde = vm->pd_gpu_addr + pt_idx * 8; + + pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); + + if (((last_pde + 8 * count) != pde) || + ((last_pt + incr * count) != pt)) { + + if (count) { + radeon_asic_vm_set_page(rdev, last_pde, + last_pt, count, incr, + RADEON_VM_PAGE_VALID); + } + + count = 1; + last_pde = pde; + last_pt = pt; + } else { + ++count; + } + } + + if (count) { + radeon_asic_vm_set_page(rdev, last_pde, last_pt, count, + incr, RADEON_VM_PAGE_VALID); + + } + + return 0; +} + +/** + * radeon_vm_update_ptes - make sure that page tables are valid + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * @dst: destination address to map to + * @flags: mapping flags + * + * Update the page tables in the range @start - @end (cayman+). + * + * Global and local mutex must be locked! + */ +static void radeon_vm_update_ptes(struct radeon_device *rdev, + struct radeon_vm *vm, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) +{ + static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; + + uint64_t last_pte = ~0, last_dst = ~0; + unsigned count = 0; + uint64_t addr; + + start = start / RADEON_GPU_PAGE_SIZE; + end = end / RADEON_GPU_PAGE_SIZE; + + /* walk over the address space and update the page tables */ + for (addr = start; addr < end; ) { + uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; + unsigned nptes; + uint64_t pte; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = RADEON_VM_PTE_COUNT - (addr & mask); + + pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); + pte += (addr & mask) * 8; + + if ((last_pte + 8 * count) != pte) { + + if (count) { + radeon_asic_vm_set_page(rdev, last_pte, + last_dst, count, + RADEON_GPU_PAGE_SIZE, + flags); + } + + count = nptes; + last_pte = pte; + last_dst = dst; + } else { + count += nptes; + } + + addr += nptes; + dst += nptes * RADEON_GPU_PAGE_SIZE; + } + + if (count) { + radeon_asic_vm_set_page(rdev, last_pte, last_dst, count, + RADEON_GPU_PAGE_SIZE, flags); + } +} + +/** * radeon_vm_bo_update_pte - map a bo into the vm page table * * @rdev: radeon_device pointer @@ -887,12 +1084,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, struct radeon_semaphore *sem = NULL; struct radeon_bo_va *bo_va; unsigned nptes, npdes, ndw; - uint64_t pe, addr; - uint64_t pfn; + uint64_t addr; int r; /* nothing to do if vm isn't bound */ - if (vm->sa_bo == NULL) + if (vm->page_directory == NULL) return 0; bo_va = radeon_vm_bo_find(vm, bo); @@ -939,25 +1135,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, } } - /* estimate number of dw needed */ - /* reserve space for 32-bit padding */ - ndw = 32; - nptes = radeon_bo_ngpu_pages(bo); - pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE); + /* assume two extra pdes in case the mapping overlaps the borders */ + npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; - /* handle cases where a bo spans several pdes */ - npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) - - (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE; + /* estimate number of dw needed */ + /* semaphore, fence and padding */ + ndw = 32; + + if (RADEON_VM_BLOCK_SIZE > 11) + /* reserve space for one header for every 2k dwords */ + ndw += (nptes >> 11) * 4; + else + /* reserve space for one header for + every (1 << BLOCK_SIZE) entries */ + ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; - /* reserve space for one header for every 2k dwords */ - ndw += (nptes >> 11) * 3; /* reserve space for pte addresses */ ndw += nptes * 2; /* reserve space for one header for every 2k dwords */ - ndw += (npdes >> 11) * 3; + ndw += (npdes >> 11) * 4; + /* reserve space for pde addresses */ ndw += npdes * 2; @@ -971,22 +1171,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, radeon_fence_note_sync(vm->fence, ridx); } - /* update page table entries */ - pe = vm->pd_gpu_addr; - pe += radeon_vm_directory_size(rdev); - pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8; - - radeon_asic_vm_set_page(rdev, pe, addr, nptes, - RADEON_GPU_PAGE_SIZE, bo_va->flags); - - /* update page directory entries */ - addr = pe; - - pe = vm->pd_gpu_addr; - pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8; + r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } - radeon_asic_vm_set_page(rdev, pe, addr, npdes, - RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID); + radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset, + addr, bo_va->flags); radeon_fence_unref(&vm->fence); r = radeon_fence_emit(rdev, &vm->fence, ridx); @@ -997,6 +1189,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, radeon_ring_unlock_commit(rdev, ring); radeon_semaphore_free(rdev, &sem, vm->fence); radeon_fence_unref(&vm->last_flush); + return 0; } @@ -1056,31 +1249,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, * @rdev: radeon_device pointer * @vm: requested vm * - * Init @vm (cayman+). - * Map the IB pool and any other shared objects into the VM - * by default as it's used by all VMs. - * Returns 0 for success, error for failure. + * Init @vm fields (cayman+). */ -int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) +void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) { - struct radeon_bo_va *bo_va; - int r; - vm->id = 0; vm->fence = NULL; - vm->last_pfn = 0; mutex_init(&vm->mutex); INIT_LIST_HEAD(&vm->list); INIT_LIST_HEAD(&vm->va); - - /* map the ib pool buffer at 0 in virtual address space, set - * read only - */ - bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); - return r; } /** @@ -1102,17 +1279,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_free_pt(rdev, vm); mutex_unlock(&rdev->vm_manager.lock); - /* remove all bo at this point non are busy any more because unbind - * waited for the last vm fence to signal - */ - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (!r) { - bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo); - list_del_init(&bo_va->bo_list); - list_del_init(&bo_va->vm_list); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); - kfree(bo_va); - } if (!list_empty(&vm->va)) { dev_err(rdev->dev, "still active bo inside vm\n"); } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f38fbcc..fe5c1f6 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, struct drm_gem_object **obj) { struct radeon_bo *robj; + unsigned long max_size; int r; *obj = NULL; @@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, if (alignment < PAGE_SIZE) { alignment = PAGE_SIZE; } + + /* maximun bo size is the minimun btw visible vram and gtt size */ + max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); + if (size > max_size) { + printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", + __func__, __LINE__, size >> 20, max_size >> 20); + return -ENOMEM; + } + +retry: r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); if (r) { - if (r != -ERESTARTSYS) + if (r != -ERESTARTSYS) { + if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { + initial_domain |= RADEON_GEM_DOMAIN_GTT; + goto retry; + } DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", size, initial_domain, alignment, r); + } return r; } *obj = &robj->gem_base; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 83b8d8a..dc781c4 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; + struct radeon_bo_va *bo_va; int r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); @@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - r = radeon_vm_init(rdev, &fpriv->vm); + radeon_vm_init(rdev, &fpriv->vm); + + /* map the ib pool buffer read only into + * virtual address space */ + bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + RADEON_VM_PAGE_READABLE | + RADEON_VM_PAGE_SNOOPED); if (r) { radeon_vm_fini(rdev, &fpriv->vm); kfree(fpriv); @@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev, /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; + struct radeon_bo_va *bo_va; + int r; + + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (!r) { + bo_va = radeon_vm_bo_find(&fpriv->vm, + rdev->ring_tmp_bo.bo); + if (bo_va) + radeon_vm_bo_rmv(rdev, bo_va); + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + } radeon_vm_fini(rdev, &fpriv->vm); kfree(fpriv); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 92487e6..0063df9 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -269,27 +269,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { .disable = radeon_legacy_encoder_disable, }; -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - -static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) -{ - struct radeon_backlight_privdata *pdata = bl_get_data(bd); - uint8_t level; - - /* Convert brightness to hardware level */ - if (bd->props.brightness < 0) - level = 0; - else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) - level = RADEON_MAX_BL_LEVEL; - else - level = bd->props.brightness; - - if (pdata->negative) - level = RADEON_MAX_BL_LEVEL - level; - - return level; -} - u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder) { @@ -331,6 +310,27 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); } +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + +static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) +{ + struct radeon_backlight_privdata *pdata = bl_get_data(bd); + uint8_t level; + + /* Convert brightness to hardware level */ + if (bd->props.brightness < 0) + level = 0; + else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) + level = RADEON_MAX_BL_LEVEL; + else + level = bd->props.brightness; + + if (pdata->negative) + level = RADEON_MAX_BL_LEVEL - level; + + return level; +} + static int radeon_legacy_backlight_update_status(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); @@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, struct backlight_properties props; struct radeon_backlight_privdata *pdata; uint8_t backlight_level; + char bl_name[16]; if (!radeon_encoder->enc_priv) return; @@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, memset(&props, 0, sizeof(props)); props.max_brightness = RADEON_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; - bd = backlight_device_register("radeon_bl", &drm_connector->kdev, + snprintf(bl_name, sizeof(bl_name), + "radeon_bl%d", dev->primary->index); + bd = backlight_device_register(bl_name, &drm_connector->kdev, pdata, &radeon_backlight_ops, &props); if (IS_ERR(bd)) { DRM_ERROR("Backlight registration failed\n"); @@ -991,11 +994,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; - if (tmds) { - if (tmds->i2c_bus) - radeon_i2c_destroy(tmds->i2c_bus); - } + /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */ kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8b27dd6..b91118c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct radeon_bo *bo; enum ttm_bo_type type; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; - unsigned long max_size = 0; size_t acc_size; int r; @@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev, } *bo_ptr = NULL; - /* maximun bo size is the minimun btw visible vram and gtt size */ - max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); - if ((page_align << PAGE_SHIFT) >= max_size) { - printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", - __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); - return -ENOMEM; - } - acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, sizeof(struct radeon_bo)); -retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -154,15 +144,6 @@ retry: acc_size, sg, &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) { - if (domain == RADEON_GEM_DOMAIN_VRAM) { - domain |= RADEON_GEM_DOMAIN_GTT; - goto retry; - } - dev_err(rdev->dev, - "object_init failed for (%lu, 0x%08X)\n", - size, domain); - } return r; } *bo_ptr = bo; diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index bba6690..47634f2 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -305,7 +305,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v) { #if DRM_DEBUG_CODE if (ring->count_dw <= 0) { - DRM_ERROR("radeon: writting more dword to ring than expected !\n"); + DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); } #endif ring->ring[ring->wptr++] = v; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index f79633a..b0db712 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2407,12 +2407,13 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) WREG32(0x15DC, 0); /* empty context1-15 */ - /* FIXME start with 4G, once using 2 level pt switch to full - * vm size space - */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); + /* Assign the pt base to something valid for now; the pts used for + * the VMs are determined by the application and setup and assigned + * on the fly in the vm part of radeon_gart.c + */ for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), @@ -2807,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, { struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - int i; - uint64_t value; - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(1))); - radeon_ring_write(ring, pe); - radeon_ring_write(ring, upper_32_bits(pe)); - for (i = 0; i < count; ++i) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - addr += incr; - value |= r600_flags; - radeon_ring_write(ring, value); - radeon_ring_write(ring, upper_32_bits(value)); + while (count) { + unsigned ndw = 2 + count * 2; + if (ndw > 0x3FFE) + ndw = 0x3FFE; + + radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw)); + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(1))); + radeon_ring_write(ring, pe); + radeon_ring_write(ring, upper_32_bits(pe)); + for (; ndw > 2; ndw -= 2, --count, pe += 8) { + uint64_t value; + if (flags & RADEON_VM_PAGE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & RADEON_VM_PAGE_VALID) + value = addr; + else + value = 0; + addr += incr; + value |= r600_flags; + radeon_ring_write(ring, value); + radeon_ring_write(ring, upper_32_bits(value)); + } } } @@ -2867,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm->id); + + /* sync PFP to ME, otherwise we might get invalid PFP reads */ + radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + radeon_ring_write(ring, 0x0); } /* diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index c71d493..1c350fc 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -201,6 +201,8 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags) goto done; } + platform_set_drvdata(pdev, sdev); + done: if (ret) shmob_drm_unload(dev); @@ -299,11 +301,9 @@ static struct drm_driver shmob_drm_driver = { #if CONFIG_PM_SLEEP static int shmob_drm_pm_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *ddev = platform_get_drvdata(pdev); - struct shmob_drm_device *sdev = ddev->dev_private; + struct shmob_drm_device *sdev = dev_get_drvdata(dev); - drm_kms_helper_poll_disable(ddev); + drm_kms_helper_poll_disable(sdev->ddev); shmob_drm_crtc_suspend(&sdev->crtc); return 0; @@ -311,9 +311,7 @@ static int shmob_drm_pm_suspend(struct device *dev) static int shmob_drm_pm_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *ddev = platform_get_drvdata(pdev); - struct shmob_drm_device *sdev = ddev->dev_private; + struct shmob_drm_device *sdev = dev_get_drvdata(dev); mutex_lock(&sdev->ddev->mode_config.mutex); shmob_drm_crtc_resume(&sdev->crtc); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 402ab69..bf6e4b5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -580,6 +580,7 @@ retry: if (unlikely(ret != 0)) return ret; +retry_reserve: spin_lock(&glob->lru_lock); if (unlikely(list_empty(&bo->ddestroy))) { @@ -587,14 +588,20 @@ retry: return 0; } - ret = ttm_bo_reserve_locked(bo, interruptible, - no_wait_reserve, false, 0); + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - if (unlikely(ret != 0)) { + if (unlikely(ret == -EBUSY)) { spin_unlock(&glob->lru_lock); - return ret; + if (likely(!no_wait_reserve)) + ret = ttm_bo_wait_unreserved(bo, interruptible); + if (unlikely(ret != 0)) + return ret; + + goto retry_reserve; } + BUG_ON(ret != 0); + /** * We can re-check for sync object without taking * the bo::lock since setting the sync object requires @@ -811,17 +818,14 @@ retry: no_wait_reserve, no_wait_gpu); kref_put(&bo->list_kref, ttm_bo_release_list); - if (likely(ret == 0 || ret == -ERESTARTSYS)) - return ret; - - goto retry; + return ret; } - ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) { spin_unlock(&glob->lru_lock); - if (likely(!no_wait_gpu)) + if (likely(!no_wait_reserve)) ret = ttm_bo_wait_unreserved(bo, interruptible); kref_put(&bo->list_kref, ttm_bo_release_list); |