summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/EDID/1024x768.S44
-rw-r--r--Documentation/EDID/1280x1024.S44
-rw-r--r--Documentation/EDID/1680x1050.S44
-rw-r--r--Documentation/EDID/1920x1080.S44
-rw-r--r--Documentation/EDID/HOWTO.txt39
-rw-r--r--Documentation/EDID/Makefile26
-rw-r--r--Documentation/EDID/edid.S261
-rw-r--r--Documentation/EDID/hex1
-rw-r--r--Documentation/kernel-parameters.txt15
-rw-r--r--arch/x86/platform/mrst/mrst.c16
-rw-r--r--drivers/char/agp/intel-agp.c1
-rw-r--r--drivers/char/agp/intel-gtt.c10
-rw-r--r--drivers/gpu/drm/Kconfig18
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/drm_crtc.c448
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c22
-rw-r--r--drivers/gpu/drm/drm_drv.c16
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_edid_load.c250
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c88
-rw-r--r--drivers/gpu/drm/drm_fops.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_memory.c19
-rw-r--r--drivers/gpu/drm/drm_modes.c30
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_platform.c12
-rw-r--r--drivers/gpu/drm/drm_stub.c26
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c5
-rw-r--r--drivers/gpu/drm/exynos/Kconfig14
-rw-r--r--drivers/gpu/drm/exynos/Makefile11
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c191
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c140
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c90
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c364
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c115
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c676
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1437
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h50
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c57
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h92
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h488
-rw-r--r--drivers/gpu/drm/gma500/Kconfig10
-rw-r--r--drivers/gpu/drm/gma500/Makefile10
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c169
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c91
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c16
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c64
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c1
-rw-r--r--drivers/gpu/drm/gma500/gtt.c4
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c691
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1017
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.h79
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c618
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h378
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c694
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h92
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c1180
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.c74
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h77
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c201
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c124
-rw-r--r--drivers/gpu/drm/gma500/mmu.c13
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c18
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c211
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c401
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/power.c17
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c34
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c65
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h435
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c50
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c30
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c62
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c829
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h38
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c5
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c3
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c324
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c66
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c49
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h164
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c528
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c205
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c275
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c197
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c387
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h132
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c388
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c23
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c210
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c31
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c34
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c18
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c210
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h35
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c43
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c275
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c314
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h150
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c809
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c409
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c232
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c126
-rw-r--r--drivers/gpu/drm/nouveau/nv20_fb.c148
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c45
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c27
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c397
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c213
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c29
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c336
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c20
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c139
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c56
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1178
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h377
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c109
-rw-r--r--drivers/gpu/drm/radeon/r200.c29
-rw-r--r--drivers/gpu/drm/radeon/r300.c9
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c629
-rw-r--r--drivers/gpu/drm/radeon/r600d.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h241
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1579
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c90
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c15
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman24
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen24
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r60020
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c23
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/savage/savage_state.c5
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c72
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c55
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c60
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c8
-rw-r--r--drivers/gpu/drm/udl/Kconfig12
-rw-r--r--drivers/gpu/drm/udl/Makefile6
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c141
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c99
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h141
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c80
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c611
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c227
-rw-r--r--drivers/gpu/drm/udl/udl_main.c338
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c414
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c253
-rw-r--r--drivers/gpu/drm/via/via_map.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c55
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c91
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c38
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c3
-rw-r--r--drivers/video/fbmem.c18
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--include/drm/drm.h2
-rw-r--r--include/drm/drmP.h25
-rw-r--r--include/drm/drm_crtc.h48
-rw-r--r--include/drm/drm_edid.h1
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/exynos_drm.h26
-rw-r--r--include/drm/gma_drm.h2
-rw-r--r--include/drm/i915_drm.h1
-rw-r--r--include/drm/intel-gtt.h4
-rw-r--r--include/drm/radeon_drm.h24
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/i2c-algo-bit.h1
-rw-r--r--include/linux/i2c/tc35876x.h11
271 files changed, 22732 insertions, 6456 deletions
diff --git a/Documentation/EDID/1024x768.S b/Documentation/EDID/1024x768.S
new file mode 100644
index 0000000..4b486fe
--- /dev/null
+++ b/Documentation/EDID/1024x768.S
@@ -0,0 +1,44 @@
+/*
+ 1024x768.S: EDID data set for standard 1024x768 60 Hz monitor
+
+ Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+/* EDID */
+#define VERSION 1
+#define REVISION 3
+
+/* Display */
+#define CLOCK 65000 /* kHz */
+#define XPIX 1024
+#define YPIX 768
+#define XY_RATIO XY_RATIO_4_3
+#define XBLANK 320
+#define YBLANK 38
+#define XOFFSET 8
+#define XPULSE 144
+#define YOFFSET (63+3)
+#define YPULSE (63+6)
+#define DPI 72
+#define VFREQ 60 /* Hz */
+#define TIMING_NAME "Linux XGA"
+#define ESTABLISHED_TIMINGS_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */
+#define HSYNC_POL 0
+#define VSYNC_POL 0
+#define CRC 0x55
+
+#include "edid.S"
diff --git a/Documentation/EDID/1280x1024.S b/Documentation/EDID/1280x1024.S
new file mode 100644
index 0000000..a2799fe
--- /dev/null
+++ b/Documentation/EDID/1280x1024.S
@@ -0,0 +1,44 @@
+/*
+ 1280x1024.S: EDID data set for standard 1280x1024 60 Hz monitor
+
+ Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+/* EDID */
+#define VERSION 1
+#define REVISION 3
+
+/* Display */
+#define CLOCK 108000 /* kHz */
+#define XPIX 1280
+#define YPIX 1024
+#define XY_RATIO XY_RATIO_5_4
+#define XBLANK 408
+#define YBLANK 42
+#define XOFFSET 48
+#define XPULSE 112
+#define YOFFSET (63+1)
+#define YPULSE (63+3)
+#define DPI 72
+#define VFREQ 60 /* Hz */
+#define TIMING_NAME "Linux SXGA"
+#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
+#define HSYNC_POL 1
+#define VSYNC_POL 1
+#define CRC 0xa0
+
+#include "edid.S"
diff --git a/Documentation/EDID/1680x1050.S b/Documentation/EDID/1680x1050.S
new file mode 100644
index 0000000..96f67ca
--- /dev/null
+++ b/Documentation/EDID/1680x1050.S
@@ -0,0 +1,44 @@
+/*
+ 1680x1050.S: EDID data set for standard 1680x1050 60 Hz monitor
+
+ Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+/* EDID */
+#define VERSION 1
+#define REVISION 3
+
+/* Display */
+#define CLOCK 146250 /* kHz */
+#define XPIX 1680
+#define YPIX 1050
+#define XY_RATIO XY_RATIO_16_10
+#define XBLANK 560
+#define YBLANK 39
+#define XOFFSET 104
+#define XPULSE 176
+#define YOFFSET (63+3)
+#define YPULSE (63+6)
+#define DPI 96
+#define VFREQ 60 /* Hz */
+#define TIMING_NAME "Linux WSXGA"
+#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
+#define HSYNC_POL 1
+#define VSYNC_POL 1
+#define CRC 0x26
+
+#include "edid.S"
diff --git a/Documentation/EDID/1920x1080.S b/Documentation/EDID/1920x1080.S
new file mode 100644
index 0000000..36ed5d5
--- /dev/null
+++ b/Documentation/EDID/1920x1080.S
@@ -0,0 +1,44 @@
+/*
+ 1920x1080.S: EDID data set for standard 1920x1080 60 Hz monitor
+
+ Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+/* EDID */
+#define VERSION 1
+#define REVISION 3
+
+/* Display */
+#define CLOCK 148500 /* kHz */
+#define XPIX 1920
+#define YPIX 1080
+#define XY_RATIO XY_RATIO_16_9
+#define XBLANK 280
+#define YBLANK 45
+#define XOFFSET 88
+#define XPULSE 44
+#define YOFFSET (63+4)
+#define YPULSE (63+5)
+#define DPI 96
+#define VFREQ 60 /* Hz */
+#define TIMING_NAME "Linux FHD"
+#define ESTABLISHED_TIMINGS_BITS 0x00 /* none */
+#define HSYNC_POL 1
+#define VSYNC_POL 1
+#define CRC 0x05
+
+#include "edid.S"
diff --git a/Documentation/EDID/HOWTO.txt b/Documentation/EDID/HOWTO.txt
new file mode 100644
index 0000000..75a9f2a
--- /dev/null
+++ b/Documentation/EDID/HOWTO.txt
@@ -0,0 +1,39 @@
+In the good old days when graphics parameters were configured explicitly
+in a file called xorg.conf, even broken hardware could be managed.
+
+Today, with the advent of Kernel Mode Setting, a graphics board is
+either correctly working because all components follow the standards -
+or the computer is unusable, because the screen remains dark after
+booting or it displays the wrong area. Cases when this happens are:
+- The graphics board does not recognize the monitor.
+- The graphics board is unable to detect any EDID data.
+- The graphics board incorrectly forwards EDID data to the driver.
+- The monitor sends no or bogus EDID data.
+- A KVM sends its own EDID data instead of querying the connected monitor.
+Adding the kernel parameter "nomodeset" helps in most cases, but causes
+restrictions later on.
+
+As a remedy for such situations, the kernel configuration item
+CONFIG_DRM_LOAD_EDID_FIRMWARE was introduced. It allows to provide an
+individually prepared or corrected EDID data set in the /lib/firmware
+directory from where it is loaded via the firmware interface. The code
+(see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for
+commonly used screen resolutions (1024x768, 1280x1024, 1680x1050,
+1920x1080) as binary blobs, but the kernel source tree does not contain
+code to create these data. In order to elucidate the origin of the
+built-in binary EDID blobs and to facilitate the creation of individual
+data for a specific misbehaving monitor, commented sources and a
+Makefile environment are given here.
+
+To create binary EDID and C source code files from the existing data
+material, simply type "make".
+
+If you want to create your own EDID file, copy the file 1024x768.S and
+replace the settings with your own data. The CRC value in the last line
+ #define CRC 0x55
+is a bit tricky. After a first version of the binary data set is
+created, it must be be checked with the "edid-decode" utility which will
+most probably complain about a wrong CRC. Fortunately, the utility also
+displays the correct CRC which must then be inserted into the source
+file. After the make procedure is repeated, the EDID data set is ready
+to be used.
diff --git a/Documentation/EDID/Makefile b/Documentation/EDID/Makefile
new file mode 100644
index 0000000..17763ca
--- /dev/null
+++ b/Documentation/EDID/Makefile
@@ -0,0 +1,26 @@
+
+SOURCES := $(wildcard [0-9]*x[0-9]*.S)
+
+BIN := $(patsubst %.S, %.bin, $(SOURCES))
+
+IHEX := $(patsubst %.S, %.bin.ihex, $(SOURCES))
+
+CODE := $(patsubst %.S, %.c, $(SOURCES))
+
+all: $(BIN) $(IHEX) $(CODE)
+
+clean:
+ @rm -f *.o *.bin.ihex *.bin *.c
+
+%.o: %.S
+ @cc -c $^
+
+%.bin: %.o
+ @objcopy -Obinary $^ $@
+
+%.bin.ihex: %.o
+ @objcopy -Oihex $^ $@
+ @dos2unix $@ 2>/dev/null
+
+%.c: %.bin
+ @echo "{" >$@; hexdump -f hex $^ >>$@; echo "};" >>$@
diff --git a/Documentation/EDID/edid.S b/Documentation/EDID/edid.S
new file mode 100644
index 0000000..ea97ae2
--- /dev/null
+++ b/Documentation/EDID/edid.S
@@ -0,0 +1,261 @@
+/*
+ edid.S: EDID data template
+
+ Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+
+/* Manufacturer */
+#define MFG_LNX1 'L'
+#define MFG_LNX2 'N'
+#define MFG_LNX3 'X'
+#define SERIAL 0
+#define YEAR 2012
+#define WEEK 5
+
+/* EDID 1.3 standard definitions */
+#define XY_RATIO_16_10 0b00
+#define XY_RATIO_4_3 0b01
+#define XY_RATIO_5_4 0b10
+#define XY_RATIO_16_9 0b11
+
+#define mfgname2id(v1,v2,v3) \
+ ((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f))
+#define swap16(v1) ((v1>>8)+((v1&0xff)<<8))
+#define msbs2(v1,v2) ((((v1>>8)&0x0f)<<4)+((v2>>8)&0x0f))
+#define msbs4(v1,v2,v3,v4) \
+ (((v1&0x03)>>2)+((v2&0x03)>>4)+((v3&0x03)>>6)+((v4&0x03)>>8))
+#define pixdpi2mm(pix,dpi) ((pix*25)/dpi)
+#define xsize pixdpi2mm(XPIX,DPI)
+#define ysize pixdpi2mm(YPIX,DPI)
+
+ .data
+
+/* Fixed header pattern */
+header: .byte 0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x00
+
+mfg_id: .word swap16(mfgname2id(MFG_LNX1, MFG_LNX2, MFG_LNX3))
+
+prod_code: .word 0
+
+/* Serial number. 32 bits, little endian. */
+serial_number: .long SERIAL
+
+/* Week of manufacture */
+week: .byte WEEK
+
+/* Year of manufacture, less 1990. (1990-2245)
+ If week=255, it is the model year instead */
+year: .byte YEAR-1990
+
+version: .byte VERSION /* EDID version, usually 1 (for 1.3) */
+revision: .byte REVISION /* EDID revision, usually 3 (for 1.3) */
+
+/* If Bit 7=1 Digital input. If set, the following bit definitions apply:
+ Bits 6-1 Reserved, must be 0
+ Bit 0 Signal is compatible with VESA DFP 1.x TMDS CRGB,
+ 1 pixel per clock, up to 8 bits per color, MSB aligned,
+ If Bit 7=0 Analog input. If clear, the following bit definitions apply:
+ Bits 6-5 Video white and sync levels, relative to blank
+ 00=+0.7/-0.3 V; 01=+0.714/-0.286 V;
+ 10=+1.0/-0.4 V; 11=+0.7/0 V
+ Bit 4 Blank-to-black setup (pedestal) expected
+ Bit 3 Separate sync supported
+ Bit 2 Composite sync (on HSync) supported
+ Bit 1 Sync on green supported
+ Bit 0 VSync pulse must be serrated when somposite or
+ sync-on-green is used. */
+video_parms: .byte 0x6d
+
+/* Maximum horizontal image size, in centimetres
+ (max 292 cm/115 in at 16:9 aspect ratio) */
+max_hor_size: .byte xsize/10
+
+/* Maximum vertical image size, in centimetres.
+ If either byte is 0, undefined (e.g. projector) */
+max_vert_size: .byte ysize/10
+
+/* Display gamma, minus 1, times 100 (range 1.00-3.5 */
+gamma: .byte 120
+
+/* Bit 7 DPMS standby supported
+ Bit 6 DPMS suspend supported
+ Bit 5 DPMS active-off supported
+ Bits 4-3 Display type: 00=monochrome; 01=RGB colour;
+ 10=non-RGB multicolour; 11=undefined
+ Bit 2 Standard sRGB colour space. Bytes 25-34 must contain
+ sRGB standard values.
+ Bit 1 Preferred timing mode specified in descriptor block 1.
+ Bit 0 GTF supported with default parameter values. */
+dsp_features: .byte 0xea
+
+/* Chromaticity coordinates. */
+/* Red and green least-significant bits
+ Bits 7-6 Red x value least-significant 2 bits
+ Bits 5-4 Red y value least-significant 2 bits
+ Bits 3-2 Green x value lst-significant 2 bits
+ Bits 1-0 Green y value least-significant 2 bits */
+red_green_lsb: .byte 0x5e
+
+/* Blue and white least-significant 2 bits */
+blue_white_lsb: .byte 0xc0
+
+/* Red x value most significant 8 bits.
+ 0-255 encodes 0-0.996 (255/256); 0-0.999 (1023/1024) with lsbits */
+red_x_msb: .byte 0xa4
+
+/* Red y value most significant 8 bits */
+red_y_msb: .byte 0x59
+
+/* Green x and y value most significant 8 bits */
+green_x_y_msb: .byte 0x4a,0x98
+
+/* Blue x and y value most significant 8 bits */
+blue_x_y_msb: .byte 0x25,0x20
+
+/* Default white point x and y value most significant 8 bits */
+white_x_y_msb: .byte 0x50,0x54
+
+/* Established timings */
+/* Bit 7 720x400 @ 70 Hz
+ Bit 6 720x400 @ 88 Hz
+ Bit 5 640x480 @ 60 Hz
+ Bit 4 640x480 @ 67 Hz
+ Bit 3 640x480 @ 72 Hz
+ Bit 2 640x480 @ 75 Hz
+ Bit 1 800x600 @ 56 Hz
+ Bit 0 800x600 @ 60 Hz */
+estbl_timing1: .byte 0x00
+
+/* Bit 7 800x600 @ 72 Hz
+ Bit 6 800x600 @ 75 Hz
+ Bit 5 832x624 @ 75 Hz
+ Bit 4 1024x768 @ 87 Hz, interlaced (1024x768)
+ Bit 3 1024x768 @ 60 Hz
+ Bit 2 1024x768 @ 72 Hz
+ Bit 1 1024x768 @ 75 Hz
+ Bit 0 1280x1024 @ 75 Hz */
+estbl_timing2: .byte ESTABLISHED_TIMINGS_BITS
+
+/* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II)
+ Bits 6-0 Other manufacturer-specific display mod */
+estbl_timing3: .byte 0x00
+
+/* Standard timing */
+/* X resolution, less 31, divided by 8 (256-2288 pixels) */
+std_xres: .byte (XPIX/8)-31
+/* Y resolution, X:Y pixel ratio
+ Bits 7-6 X:Y pixel ratio: 00=16:10; 01=4:3; 10=5:4; 11=16:9.
+ Bits 5-0 Vertical frequency, less 60 (60-123 Hz) */
+std_vres: .byte (XY_RATIO<<6)+VFREQ-60
+ .fill 7,2,0x0101 /* Unused */
+
+descriptor1:
+/* Pixel clock in 10 kHz units. (0.-655.35 MHz, little-endian) */
+clock: .word CLOCK/10
+
+/* Horizontal active pixels 8 lsbits (0-4095) */
+x_act_lsb: .byte XPIX&0xff
+/* Horizontal blanking pixels 8 lsbits (0-4095)
+ End of active to start of next active. */
+x_blk_lsb: .byte XBLANK&0xff
+/* Bits 7-4 Horizontal active pixels 4 msbits
+ Bits 3-0 Horizontal blanking pixels 4 msbits */
+x_msbs: .byte msbs2(XPIX,XBLANK)
+
+/* Vertical active lines 8 lsbits (0-4095) */
+y_act_lsb: .byte YPIX&0xff
+/* Vertical blanking lines 8 lsbits (0-4095) */
+y_blk_lsb: .byte YBLANK&0xff
+/* Bits 7-4 Vertical active lines 4 msbits
+ Bits 3-0 Vertical blanking lines 4 msbits */
+y_msbs: .byte msbs2(YPIX,YBLANK)
+
+/* Horizontal sync offset pixels 8 lsbits (0-1023) From blanking start */
+x_snc_off_lsb: .byte XOFFSET&0xff
+/* Horizontal sync pulse width pixels 8 lsbits (0-1023) */
+x_snc_pls_lsb: .byte XPULSE&0xff
+/* Bits 7-4 Vertical sync offset lines 4 lsbits -63)
+ Bits 3-0 Vertical sync pulse width lines 4 lsbits -63) */
+y_snc_lsb: .byte ((YOFFSET-63)<<4)+(YPULSE-63)
+/* Bits 7-6 Horizontal sync offset pixels 2 msbits
+ Bits 5-4 Horizontal sync pulse width pixels 2 msbits
+ Bits 3-2 Vertical sync offset lines 2 msbits
+ Bits 1-0 Vertical sync pulse width lines 2 msbits */
+xy_snc_msbs: .byte msbs4(XOFFSET,XPULSE,YOFFSET,YPULSE)
+
+/* Horizontal display size, mm, 8 lsbits (0-4095 mm, 161 in) */
+x_dsp_size: .byte xsize&0xff
+
+/* Vertical display size, mm, 8 lsbits (0-4095 mm, 161 in) */
+y_dsp_size: .byte ysize&0xff
+
+/* Bits 7-4 Horizontal display size, mm, 4 msbits
+ Bits 3-0 Vertical display size, mm, 4 msbits */
+dsp_size_mbsb: .byte msbs2(xsize,ysize)
+
+/* Horizontal border pixels (each side; total is twice this) */
+x_border: .byte 0
+/* Vertical border lines (each side; total is twice this) */
+y_border: .byte 0
+
+/* Bit 7 Interlaced
+ Bits 6-5 Stereo mode: 00=No stereo; other values depend on bit 0:
+ Bit 0=0: 01=Field sequential, sync=1 during right; 10=similar,
+ sync=1 during left; 11=4-way interleaved stereo
+ Bit 0=1 2-way interleaved stereo: 01=Right image on even lines;
+ 10=Left image on even lines; 11=side-by-side
+ Bits 4-3 Sync type: 00=Analog composite; 01=Bipolar analog composite;
+ 10=Digital composite (on HSync); 11=Digital separate
+ Bit 2 If digital separate: Vertical sync polarity (1=positive)
+ Other types: VSync serrated (HSync during VSync)
+ Bit 1 If analog sync: Sync on all 3 RGB lines (else green only)
+ Digital: HSync polarity (1=positive)
+ Bit 0 2-way line-interleaved stereo, if bits 4-3 are not 00. */
+features: .byte 0x18+(VSYNC_POL<<2)+(HSYNC_POL<<1)
+
+descriptor2: .byte 0,0 /* Not a detailed timing descriptor */
+ .byte 0 /* Must be zero */
+ .byte 0xff /* Descriptor is monitor serial number (text) */
+ .byte 0 /* Must be zero */
+start1: .ascii "Linux #0"
+end1: .byte 0x0a /* End marker */
+ .fill 12-(end1-start1), 1, 0x20 /* Padded spaces */
+descriptor3: .byte 0,0 /* Not a detailed timing descriptor */
+ .byte 0 /* Must be zero */
+ .byte 0xfd /* Descriptor is monitor range limits */
+ .byte 0 /* Must be zero */
+start2: .byte VFREQ-1 /* Minimum vertical field rate (1-255 Hz) */
+ .byte VFREQ+1 /* Maximum vertical field rate (1-255 Hz) */
+ .byte (CLOCK/(XPIX+XBLANK))-1 /* Minimum horizontal line rate
+ (1-255 kHz) */
+ .byte (CLOCK/(XPIX+XBLANK))+1 /* Maximum horizontal line rate
+ (1-255 kHz) */
+ .byte (CLOCK/10000)+1 /* Maximum pixel clock rate, rounded up
+ to 10 MHz multiple (10-2550 MHz) */
+ .byte 0 /* No extended timing information type */
+end2: .byte 0x0a /* End marker */
+ .fill 12-(end2-start2), 1, 0x20 /* Padded spaces */
+descriptor4: .byte 0,0 /* Not a detailed timing descriptor */
+ .byte 0 /* Must be zero */
+ .byte 0xfc /* Descriptor is text */
+ .byte 0 /* Must be zero */
+start3: .ascii TIMING_NAME
+end3: .byte 0x0a /* End marker */
+ .fill 12-(end3-start3), 1, 0x20 /* Padded spaces */
+extensions: .byte 0 /* Number of extensions to follow */
+checksum: .byte CRC /* Sum of all bytes must be 0 */
diff --git a/Documentation/EDID/hex b/Documentation/EDID/hex
new file mode 100644
index 0000000..8873ebb
--- /dev/null
+++ b/Documentation/EDID/hex
@@ -0,0 +1 @@
+"\t" 8/1 "0x%02x, " "\n"
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7986d79..247dcfd 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -713,6 +713,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
The filter can be disabled or changed to another
driver later using sysfs.
+ drm_kms_helper.edid_firmware=[<connector>:]<file>
+ Broken monitors, graphic adapters and KVMs may
+ send no or incorrect EDID data sets. This parameter
+ allows to specify an EDID data set in the
+ /lib/firmware directory that is used instead.
+ Generic built-in EDID data sets are used, if one of
+ edid/1024x768.bin, edid/1280x1024.bin,
+ edid/1680x1050.bin, or edid/1920x1080.bin is given
+ and no file with the same name exists. Details and
+ instructions how to build your own EDID data are
+ available in Documentation/EDID/HOWTO.txt. An EDID
+ data set will only be used for a particular connector,
+ if its name and a colon are prepended to the EDID
+ name.
+
dscc4.setup= [NET]
earlycon= [KNL] Output early console device and options.
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 721e652..e0a3723 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -28,6 +28,8 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/mfd/intel_msic.h>
+#include <linux/gpio.h>
+#include <linux/i2c/tc35876x.h>
#include <asm/setup.h>
#include <asm/mpspec_def.h>
@@ -675,6 +677,19 @@ static void *msic_thermal_platform_data(void *info)
return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL);
}
+/* tc35876x DSI-LVDS bridge chip and panel platform data */
+static void *tc35876x_platform_data(void *data)
+{
+ static struct tc35876x_platform_data pdata;
+
+ /* gpio pins set to -1 will not be used by the driver */
+ pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
+ pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
+ pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
+
+ return &pdata;
+}
+
static const struct devs_id __initconst device_ids[] = {
{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
@@ -687,6 +702,7 @@ static const struct devs_id __initconst device_ids[] = {
{"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
{"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
+ {"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data},
/* MSIC subdevices */
{"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index b427711..962e75d 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -850,6 +850,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
}
+ ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
ID(PCI_DEVICE_ID_INTEL_82443LX_0),
ID(PCI_DEVICE_ID_INTEL_82443BX_0),
ID(PCI_DEVICE_ID_INTEL_82443GX_0),
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c92424c..5cf47ac 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -76,7 +76,6 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
- dma_addr_t scratch_page_dma;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@@ -306,9 +305,9 @@ static int intel_gtt_setup_scratch_page(void)
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL;
- intel_private.scratch_page_dma = dma_addr;
+ intel_private.base.scratch_page_dma = dma_addr;
} else
- intel_private.scratch_page_dma = page_to_phys(page);
+ intel_private.base.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page;
@@ -631,7 +630,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
- pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+ pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
@@ -681,6 +680,7 @@ static int intel_gtt_init(void)
iounmap(intel_private.registers);
return -ENOMEM;
}
+ intel_private.base.gtt = intel_private.gtt;
global_cache_flush(); /* FIXME: ? */
@@ -975,7 +975,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
- intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2418429..87ca18b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -18,6 +18,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+config DRM_USB
+ tristate
+ depends on DRM
+ select USB
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -27,6 +32,18 @@ config DRM_KMS_HELPER
help
FB and CRTC helpers for KMS drivers.
+config DRM_LOAD_EDID_FIRMWARE
+ bool "Allow to specify an EDID data set instead of probing for it"
+ depends on DRM_KMS_HELPER
+ help
+ Say Y here, if you want to use EDID data to be loaded from the
+ /lib/firmware directory or one of the provided built-in
+ data sets. This may be necessary, if the graphics adapter or
+ monitor are unable to provide appropriate EDID data. Since this
+ feature is provided as a workaround for broken hardware, the
+ default case is N. Details and instructions how to build your own
+ EDID data are given in Documentation/EDID/HOWTO.txt.
+
config DRM_TTM
tristate
depends on DRM
@@ -165,3 +182,4 @@ source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
+source "drivers/gpu/drm/udl/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0cde1b8..a858532 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,17 +12,21 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o drm_usb.o
+ drm_trace_points.o drm_global.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
+drm-usb-y := drm_usb.o
+
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_USB) += drm_usb.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
@@ -37,4 +41,5 @@ obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
+obj-$(CONFIG_DRM_UDL) += udl/
obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5e818a8..d3aaeb6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,11 +38,6 @@
#include "drm_edid.h"
#include "drm_fourcc.h"
-struct drm_prop_enum_list {
- int type;
- char *name;
-};
-
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
@@ -298,9 +293,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
int ret;
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
- if (ret) {
+ if (ret)
return ret;
- }
fb->dev = dev;
fb->funcs = funcs;
@@ -370,19 +364,31 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
* Caller must hold mode config lock.
*
* Inits a new object created as base part of an driver crtc object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
*/
-void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
+ int ret;
+
crtc->dev = dev;
crtc->funcs = funcs;
mutex_lock(&dev->mode_config.mutex);
- drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+
+ ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ if (ret)
+ goto out;
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
+
+ out:
mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
}
EXPORT_SYMBOL(drm_crtc_init);
@@ -442,7 +448,7 @@ void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_del(&mode->head);
- kfree(mode);
+ drm_mode_destroy(connector->dev, mode);
}
EXPORT_SYMBOL(drm_mode_remove);
@@ -454,21 +460,29 @@ EXPORT_SYMBOL(drm_mode_remove);
* @name: user visible name of the connector
*
* LOCKING:
- * Caller must hold @dev's mode_config lock.
+ * Takes mode config lock.
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
*/
-void drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
{
+ int ret;
+
mutex_lock(&dev->mode_config.mutex);
+ ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ if (ret)
+ goto out;
+
connector->dev = dev;
connector->funcs = funcs;
- drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
connector->connector_type = connector_type;
connector->connector_type_id =
++drm_connector_enum_list[connector_type].count; /* TODO */
@@ -488,7 +502,10 @@ void drm_connector_init(struct drm_device *dev,
drm_connector_attach_property(connector,
dev->mode_config.dpms_property, 0);
+ out:
mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
}
EXPORT_SYMBOL(drm_connector_init);
@@ -497,7 +514,7 @@ EXPORT_SYMBOL(drm_connector_init);
* @connector: connector to cleanup
*
* LOCKING:
- * Caller must hold @dev's mode_config lock.
+ * Takes mode config lock.
*
* Cleans up the connector but doesn't free the object.
*/
@@ -523,23 +540,41 @@ void drm_connector_cleanup(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_cleanup);
-void drm_encoder_init(struct drm_device *dev,
+void drm_connector_unplug_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+
+ /* taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_sysfs_connector_remove(connector);
+
+}
+EXPORT_SYMBOL(drm_connector_unplug_all);
+
+int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type)
{
+ int ret;
+
mutex_lock(&dev->mode_config.mutex);
- encoder->dev = dev;
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+ goto out;
- drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
+ out:
mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
}
EXPORT_SYMBOL(drm_encoder_init);
@@ -560,18 +595,23 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
const uint32_t *formats, uint32_t format_count,
bool priv)
{
+ int ret;
+
mutex_lock(&dev->mode_config.mutex);
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+ goto out;
+
plane->dev = dev;
- drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
plane->funcs = funcs;
plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
- mutex_unlock(&dev->mode_config.mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
@@ -589,9 +629,10 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
INIT_LIST_HEAD(&plane->head);
}
+ out:
mutex_unlock(&dev->mode_config.mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(drm_plane_init);
@@ -631,7 +672,11 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev)
if (!nmode)
return NULL;
- drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ kfree(nmode);
+ return NULL;
+ }
+
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
@@ -648,6 +693,9 @@ EXPORT_SYMBOL(drm_mode_create);
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
{
+ if (!mode)
+ return;
+
drm_mode_object_put(dev, &mode->base);
kfree(mode);
@@ -658,7 +706,6 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
- int i;
/*
* Standard properties (apply to all connectors)
@@ -668,11 +715,9 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
"EDID", 0);
dev->mode_config.edid_property = edid;
- dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
- drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
- drm_dpms_enum_list[i].name);
+ dpms = drm_property_create_enum(dev, 0,
+ "DPMS", drm_dpms_enum_list,
+ ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
return 0;
@@ -688,30 +733,21 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
{
struct drm_property *dvi_i_selector;
struct drm_property *dvi_i_subconnector;
- int i;
if (dev->mode_config.dvi_i_select_subconnector_property)
return 0;
dvi_i_selector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ drm_property_create_enum(dev, 0,
"select subconnector",
+ drm_dvi_i_select_enum_list,
ARRAY_SIZE(drm_dvi_i_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
- drm_property_add_enum(dvi_i_selector, i,
- drm_dvi_i_select_enum_list[i].type,
- drm_dvi_i_select_enum_list[i].name);
dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
- dvi_i_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
+ dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
+ drm_dvi_i_subconnector_enum_list,
ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
- drm_property_add_enum(dvi_i_subconnector, i,
- drm_dvi_i_subconnector_enum_list[i].type,
- drm_dvi_i_subconnector_enum_list[i].name);
dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
return 0;
@@ -742,51 +778,33 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
/*
* Basic connector properties
*/
- tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ tv_selector = drm_property_create_enum(dev, 0,
"select subconnector",
+ drm_tv_select_enum_list,
ARRAY_SIZE(drm_tv_select_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
- drm_property_add_enum(tv_selector, i,
- drm_tv_select_enum_list[i].type,
- drm_tv_select_enum_list[i].name);
dev->mode_config.tv_select_subconnector_property = tv_selector;
tv_subconnector =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE, "subconnector",
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_tv_subconnector_enum_list,
ARRAY_SIZE(drm_tv_subconnector_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
- drm_property_add_enum(tv_subconnector, i,
- drm_tv_subconnector_enum_list[i].type,
- drm_tv_subconnector_enum_list[i].name);
dev->mode_config.tv_subconnector_property = tv_subconnector;
/*
* Other, TV specific properties: margins & TV modes.
*/
dev->mode_config.tv_left_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left margin", 2);
- dev->mode_config.tv_left_margin_property->values[0] = 0;
- dev->mode_config.tv_left_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "left margin", 0, 100);
dev->mode_config.tv_right_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right margin", 2);
- dev->mode_config.tv_right_margin_property->values[0] = 0;
- dev->mode_config.tv_right_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "right margin", 0, 100);
dev->mode_config.tv_top_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top margin", 2);
- dev->mode_config.tv_top_margin_property->values[0] = 0;
- dev->mode_config.tv_top_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "top margin", 0, 100);
dev->mode_config.tv_bottom_margin_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom margin", 2);
- dev->mode_config.tv_bottom_margin_property->values[0] = 0;
- dev->mode_config.tv_bottom_margin_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "bottom margin", 0, 100);
dev->mode_config.tv_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
@@ -796,40 +814,22 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
i, modes[i]);
dev->mode_config.tv_brightness_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "brightness", 2);
- dev->mode_config.tv_brightness_property->values[0] = 0;
- dev->mode_config.tv_brightness_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "brightness", 0, 100);
dev->mode_config.tv_contrast_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "contrast", 2);
- dev->mode_config.tv_contrast_property->values[0] = 0;
- dev->mode_config.tv_contrast_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "contrast", 0, 100);
dev->mode_config.tv_flicker_reduction_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "flicker reduction", 2);
- dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
- dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
dev->mode_config.tv_overscan_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "overscan", 2);
- dev->mode_config.tv_overscan_property->values[0] = 0;
- dev->mode_config.tv_overscan_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "overscan", 0, 100);
dev->mode_config.tv_saturation_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "saturation", 2);
- dev->mode_config.tv_saturation_property->values[0] = 0;
- dev->mode_config.tv_saturation_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "saturation", 0, 100);
dev->mode_config.tv_hue_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "hue", 2);
- dev->mode_config.tv_hue_property->values[0] = 0;
- dev->mode_config.tv_hue_property->values[1] = 100;
+ drm_property_create_range(dev, 0, "hue", 0, 100);
return 0;
}
@@ -845,18 +845,14 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{
struct drm_property *scaling_mode;
- int i;
if (dev->mode_config.scaling_mode_property)
return 0;
scaling_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
+ drm_property_create_enum(dev, 0, "scaling mode",
+ drm_scaling_mode_enum_list,
ARRAY_SIZE(drm_scaling_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
- drm_property_add_enum(scaling_mode, i,
- drm_scaling_mode_enum_list[i].type,
- drm_scaling_mode_enum_list[i].name);
dev->mode_config.scaling_mode_property = scaling_mode;
@@ -874,18 +870,14 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
int drm_mode_create_dithering_property(struct drm_device *dev)
{
struct drm_property *dithering_mode;
- int i;
if (dev->mode_config.dithering_mode_property)
return 0;
dithering_mode =
- drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
+ drm_property_create_enum(dev, 0, "dithering",
+ drm_dithering_mode_enum_list,
ARRAY_SIZE(drm_dithering_mode_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
- drm_property_add_enum(dithering_mode, i,
- drm_dithering_mode_enum_list[i].type,
- drm_dithering_mode_enum_list[i].name);
dev->mode_config.dithering_mode_property = dithering_mode;
return 0;
@@ -902,20 +894,15 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property);
int drm_mode_create_dirty_info_property(struct drm_device *dev)
{
struct drm_property *dirty_info;
- int i;
if (dev->mode_config.dirty_info_property)
return 0;
dirty_info =
- drm_property_create(dev, DRM_MODE_PROP_ENUM |
- DRM_MODE_PROP_IMMUTABLE,
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"dirty",
+ drm_dirty_info_enum_list,
ARRAY_SIZE(drm_dirty_info_enum_list));
- for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
- drm_property_add_enum(dirty_info, i,
- drm_dirty_info_enum_list[i].type,
- drm_dirty_info_enum_list[i].name);
dev->mode_config.dirty_info_property = dirty_info;
return 0;
@@ -999,6 +986,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
return 0;
}
+EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
/**
* drm_mode_config_cleanup - free up DRM mode_config info
@@ -1048,6 +1036,9 @@ void drm_mode_config_cleanup(struct drm_device *dev)
head) {
plane->funcs->destroy(plane);
}
+
+ idr_remove_all(&dev->mode_config.crtc_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
@@ -1062,9 +1053,16 @@ EXPORT_SYMBOL(drm_mode_config_cleanup);
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
-void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
- struct drm_display_mode *in)
+static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+ const struct drm_display_mode *in)
{
+ WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
+ in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+ in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+ in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+ in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
+ "timing values too large for mode info\n");
+
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@@ -1093,10 +1091,16 @@ void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
*/
-void drm_crtc_convert_umode(struct drm_display_mode *out,
- struct drm_mode_modeinfo *in)
+static int drm_crtc_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in)
{
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return -ERANGE;
+
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@@ -1113,6 +1117,8 @@ void drm_crtc_convert_umode(struct drm_display_mode *out,
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+
+ return 0;
}
/**
@@ -1311,7 +1317,7 @@ out:
* @arg: arg from ioctl
*
* LOCKING:
- * Caller? (FIXME)
+ * Takes mode config lock.
*
* Construct a CRTC configuration structure to return to the user.
*
@@ -1371,7 +1377,7 @@ out:
* @arg: arg from ioctl
*
* LOCKING:
- * Caller? (FIXME)
+ * Takes mode config lock.
*
* Construct a connector configuration structure to return to the user.
*
@@ -1553,6 +1559,9 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
+ * LOCKING:
+ * Takes mode config lock.
+ *
* Return an plane count and set of IDs.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
@@ -1599,6 +1608,9 @@ out:
* @data: ioctl data
* @file_priv: DRM file info
*
+ * LOCKING:
+ * Takes mode config lock.
+ *
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
*/
@@ -1664,6 +1676,9 @@ out:
* @data: ioctl data*
* @file_prive: DRM file info
*
+ * LOCKING:
+ * Takes mode config lock.
+ *
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
*/
@@ -1794,7 +1809,7 @@ out:
* @arg: arg from ioctl
*
* LOCKING:
- * Caller? (FIXME)
+ * Takes mode config lock.
*
* Build a new CRTC configuration based on user request.
*
@@ -1809,7 +1824,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
struct drm_mode_object *obj;
- struct drm_crtc *crtc, *crtcfb;
+ struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
struct drm_display_mode *mode = NULL;
@@ -1821,6 +1836,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
+ /* For some reason crtc x/y offsets are signed internally. */
+ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ return -ERANGE;
+
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -1836,14 +1855,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
- list_for_each_entry(crtcfb,
- &dev->mode_config.crtc_list, head) {
- if (crtcfb == crtc) {
- DRM_DEBUG_KMS("Using current fb for "
- "setmode\n");
- fb = crtc->fb;
- }
+ if (!crtc->fb) {
+ DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ ret = -EINVAL;
+ goto out;
}
+ fb = crtc->fb;
} else {
obj = drm_mode_object_find(dev, crtc_req->fb_id,
DRM_MODE_OBJECT_FB);
@@ -1857,8 +1874,30 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
mode = drm_mode_create(dev);
- drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+
+ if (mode->hdisplay > fb->width ||
+ mode->vdisplay > fb->height ||
+ crtc_req->x > fb->width - mode->hdisplay ||
+ crtc_req->y > fb->height - mode->vdisplay) {
+ DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n",
+ mode->hdisplay, mode->vdisplay,
+ crtc_req->x, crtc_req->y,
+ fb->width, fb->height);
+ ret = -ENOSPC;
+ goto out;
+ }
}
if (crtc_req->count_connectors == 0 && mode) {
@@ -1926,6 +1965,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
out:
kfree(connector_set);
+ drm_mode_destroy(dev, mode);
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
@@ -2275,7 +2315,7 @@ out:
* @arg: arg from ioctl
*
* LOCKING:
- * Caller? (FIXME)
+ * Takes mode config lock.
*
* Lookup the FB given its ID and return info about it.
*
@@ -2424,38 +2464,48 @@ void drm_fb_release(struct drm_file *priv)
*
* Add @mode to @connector's user mode list.
*/
-static int drm_mode_attachmode(struct drm_device *dev,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
+static void drm_mode_attachmode(struct drm_device *dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- int ret = 0;
-
list_add_tail(&mode->head, &connector->user_modes);
- return ret;
}
int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_connector *connector;
int ret = 0;
- struct drm_display_mode *dup_mode;
- int need_dup = 0;
+ struct drm_display_mode *dup_mode, *next;
+ LIST_HEAD(list);
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
- break;
+ continue;
if (connector->encoder->crtc == crtc) {
- if (need_dup)
- dup_mode = drm_mode_duplicate(dev, mode);
- else
- dup_mode = mode;
- ret = drm_mode_attachmode(dev, connector, dup_mode);
- if (ret)
- return ret;
- need_dup = 1;
+ dup_mode = drm_mode_duplicate(dev, mode);
+ if (!dup_mode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&dup_mode->head, &list);
}
}
- return 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->encoder->crtc == crtc)
+ list_move_tail(list.next, &connector->user_modes);
+ }
+
+ WARN_ON(!list_empty(&list));
+
+ out:
+ list_for_each_entry_safe(dup_mode, next, &list, head)
+ drm_mode_destroy(dev, dup_mode);
+
+ return ret;
}
EXPORT_SYMBOL(drm_mode_attachmode_crtc);
@@ -2534,9 +2584,14 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
goto out;
}
- drm_crtc_convert_umode(mode, umode);
+ ret = drm_crtc_convert_umode(mode, umode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ drm_mode_destroy(dev, mode);
+ goto out;
+ }
- ret = drm_mode_attachmode(dev, connector, mode);
+ drm_mode_attachmode(dev, connector, mode);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -2577,7 +2632,12 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
}
connector = obj_to_connector(obj);
- drm_crtc_convert_umode(&mode, umode);
+ ret = drm_crtc_convert_umode(&mode, umode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
ret = drm_mode_detachmode(dev, connector, &mode);
out:
mutex_unlock(&dev->mode_config.mutex);
@@ -2588,6 +2648,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values)
{
struct drm_property *property = NULL;
+ int ret;
property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
if (!property)
@@ -2599,7 +2660,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
goto fail;
}
- drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ if (ret)
+ goto fail;
+
property->flags = flags;
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_blob_list);
@@ -2612,11 +2676,59 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
list_add_tail(&property->head, &dev->mode_config.property_list);
return property;
fail:
+ kfree(property->values);
kfree(property);
return NULL;
}
EXPORT_SYMBOL(drm_property_create);
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_ENUM;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_enum);
+
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ flags |= DRM_MODE_PROP_RANGE;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
@@ -2828,6 +2940,7 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
void *data)
{
struct drm_property_blob *blob;
+ int ret;
if (!length || !data)
return NULL;
@@ -2836,13 +2949,16 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
if (!blob)
return NULL;
- blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
+ ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+ if (ret) {
+ kfree(blob);
+ return NULL;
+ }
+
blob->length = length;
memcpy(blob->data, data, length);
- drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
-
list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
return blob;
}
@@ -3021,7 +3137,7 @@ void drm_mode_connector_detach_encoder(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
-bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
crtc->gamma_size = gamma_size;
@@ -3029,10 +3145,10 @@ bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
- return false;
+ return -ENOMEM;
}
- return true;
+ return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -3178,6 +3294,18 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
fb = obj_to_fb(obj);
+ if (crtc->mode.hdisplay > fb->width ||
+ crtc->mode.vdisplay > fb->height ||
+ crtc->x > fb->width - crtc->mode.hdisplay ||
+ crtc->y > fb->height - crtc->mode.vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n",
+ fb->width, fb->height,
+ crtc->mode.hdisplay, crtc->mode.vdisplay,
+ crtc->x, crtc->y);
+ ret = -ENOSPC;
+ goto out;
+ }
+
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
ret = -ENOMEM;
spin_lock_irqsave(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 84a4a80..8111889 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -37,6 +37,7 @@
#include "drm_fourcc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
+#include "drm_edid.h"
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@@ -44,12 +45,12 @@ module_param_named(poll, drm_kms_helper_poll, bool, 0600);
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
- struct drm_display_mode *mode, *t;
+ struct drm_display_mode *mode;
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
return;
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
@@ -87,7 +88,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *t;
+ struct drm_display_mode *mode;
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
@@ -96,7 +97,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
- list_for_each_entry_safe(mode, t, &connector->modes, head)
+ list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
if (connector->force) {
@@ -118,7 +119,12 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
goto prune;
}
- count = (*connector_funcs->get_modes)(connector);
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+ count = drm_load_edid_firmware(connector);
+ if (count == 0)
+#endif
+ count = (*connector_funcs->get_modes)(connector);
+
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
if (count == 0)
@@ -136,7 +142,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
drm_mode_validate_flag(connector, mode_flags);
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = connector_funcs->mode_valid(connector,
mode);
@@ -152,7 +158,7 @@ prune:
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
- list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ list_for_each_entry(mode, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
@@ -352,6 +358,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
return true;
adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return false;
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ebf7d3f..0b65fbc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -135,23 +135,23 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -390,6 +390,10 @@ long drm_ioctl(struct file *filp,
unsigned int usize, asize;
dev = file_priv->minor->dev;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ece03fc..5a18b0d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -149,8 +149,7 @@ EXPORT_SYMBOL(drm_edid_header_is_valid);
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
-static bool
-drm_edid_block_valid(u8 *raw_edid)
+bool drm_edid_block_valid(u8 *raw_edid)
{
int i;
u8 csum = 0;
@@ -203,6 +202,7 @@ bad:
}
return 0;
}
+EXPORT_SYMBOL(drm_edid_block_valid);
/**
* drm_edid_is_valid - sanity check EDID data
@@ -226,7 +226,6 @@ bool drm_edid_is_valid(struct edid *edid)
}
EXPORT_SYMBOL(drm_edid_is_valid);
-#define DDC_ADDR 0x50
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
@@ -266,6 +265,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
+ if (ret == -ENXIO) {
+ DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
+ adapter->name);
+ break;
+ }
} while (ret != 2 && --retries);
return ret == 2 ? 0 : -1;
@@ -745,7 +749,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
- kfree(mode);
+ drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(edid),
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
new file mode 100644
index 0000000..da9acba
--- /dev/null
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -0,0 +1,250 @@
+/*
+ drm_edid_load.c: use a built-in EDID data set or load it via the firmware
+ interface
+
+ Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+
+static char edid_firmware[PATH_MAX];
+module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
+MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
+ "from built-in data or /lib/firmware instead. ");
+
+#define GENERIC_EDIDS 4
+static char *generic_edid_name[GENERIC_EDIDS] = {
+ "edid/1024x768.bin",
+ "edid/1280x1024.bin",
+ "edid/1680x1050.bin",
+ "edid/1920x1080.bin",
+};
+
+static u8 generic_edid[GENERIC_EDIDS][128] = {
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19,
+ 0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90,
+ 0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58,
+ 0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a,
+ 0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70,
+ 0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
+ 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39,
+ 0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0,
+ 0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57,
+ 0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26,
+ },
+ {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78,
+ 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+ 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+ 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+ 0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46,
+ 0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05,
+ },
+};
+
+static int edid_load(struct drm_connector *connector, char *name,
+ char *connector_name)
+{
+ const struct firmware *fw;
+ struct platform_device *pdev;
+ u8 *fwdata = NULL, *edid;
+ int fwsize, expected;
+ int builtin = 0, err = 0;
+ int i, valid_extensions = 0;
+
+ pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ DRM_ERROR("Failed to register EDID firmware platform device "
+ "for connector \"%s\"\n", connector_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = request_firmware(&fw, name, &pdev->dev);
+ platform_device_unregister(pdev);
+
+ if (err) {
+ i = 0;
+ while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
+ i++;
+ if (i < GENERIC_EDIDS) {
+ err = 0;
+ builtin = 1;
+ fwdata = generic_edid[i];
+ fwsize = sizeof(generic_edid[i]);
+ }
+ }
+
+ if (err) {
+ DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+ name, err);
+ goto out;
+ }
+
+ if (fwdata == NULL) {
+ fwdata = (u8 *) fw->data;
+ fwsize = fw->size;
+ }
+
+ expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
+ if (expected != fwsize) {
+ DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
+ "(expected %d, got %d)\n", name, expected, (int) fwsize);
+ err = -EINVAL;
+ goto relfw_out;
+ }
+
+ edid = kmalloc(fwsize, GFP_KERNEL);
+ if (edid == NULL) {
+ err = -ENOMEM;
+ goto relfw_out;
+ }
+ memcpy(edid, fwdata, fwsize);
+
+ if (!drm_edid_block_valid(edid)) {
+ DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
+ name);
+ kfree(edid);
+ err = -EINVAL;
+ goto relfw_out;
+ }
+
+ for (i = 1; i <= edid[0x7e]; i++) {
+ if (i != valid_extensions + 1)
+ memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
+ edid + i * EDID_LENGTH, EDID_LENGTH);
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH))
+ valid_extensions++;
+ }
+
+ if (valid_extensions != edid[0x7e]) {
+ edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+ DRM_INFO("Found %d valid extensions instead of %d in EDID data "
+ "\"%s\" for connector \"%s\"\n", valid_extensions,
+ edid[0x7e], name, connector_name);
+ edid[0x7e] = valid_extensions;
+ edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
+ GFP_KERNEL);
+ if (edid == NULL) {
+ err = -ENOMEM;
+ goto relfw_out;
+ }
+ }
+
+ connector->display_info.raw_edid = edid;
+ DRM_INFO("Got %s EDID base block and %d extension%s from "
+ "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
+ "external", valid_extensions, valid_extensions == 1 ? "" : "s",
+ name, connector_name);
+
+relfw_out:
+ release_firmware(fw);
+
+out:
+ return err;
+}
+
+int drm_load_edid_firmware(struct drm_connector *connector)
+{
+ char *connector_name = drm_get_connector_name(connector);
+ char *edidname = edid_firmware, *last, *colon;
+ int ret = 0;
+
+ if (*edidname == '\0')
+ return ret;
+
+ colon = strchr(edidname, ':');
+ if (colon != NULL) {
+ if (strncmp(connector_name, edidname, colon - edidname))
+ return ret;
+ edidname = colon + 1;
+ if (*edidname == '\0')
+ return ret;
+ }
+
+ last = edidname + strlen(edidname) - 1;
+ if (*last == '\n')
+ *last = '\0';
+
+ ret = edid_load(connector, edidname, connector_name);
+ if (ret)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector,
+ (struct edid *) connector->display_info.raw_edid);
+
+ return drm_add_edid_modes(connector, (struct edid *)
+ connector->display_info.raw_edid);
+}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index aada26f..7740dd2 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -306,91 +306,31 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
-static void drm_fb_helper_on(struct fb_info *info)
+static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- int i, j;
-
- /*
- * For each CRTC in this fb, turn the crtc on then,
- * find all associated encoders and turn them on.
- */
- mutex_lock(&dev->mode_config.mutex);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- crtc_funcs = crtc->helper_private;
-
- if (!crtc->enabled)
- continue;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
- /* Walk the connectors & encoders on this fb turning them on */
- for (j = 0; j < fb_helper->connector_count; j++) {
- connector = fb_helper->connector_info[j]->connector;
- connector->dpms = DRM_MODE_DPMS_ON;
- drm_connector_property_set_value(connector,
- dev->mode_config.dpms_property,
- DRM_MODE_DPMS_ON);
- }
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
- }
- }
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
- struct drm_encoder *encoder;
int i, j;
/*
- * For each CRTC in this fb, find all associated encoders
- * and turn them off, then turn off the CRTC.
+ * For each CRTC in this fb, turn the connectors on/off.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
- crtc_funcs = crtc->helper_private;
if (!crtc->enabled)
continue;
- /* Walk the connectors on this fb and mark them off */
+ /* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
- connector->dpms = dpms_mode;
+ drm_helper_connector_dpms(connector, dpms_mode);
drm_connector_property_set_value(connector,
- dev->mode_config.dpms_property,
- dpms_mode);
- }
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
-
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, dpms_mode);
- }
+ dev->mode_config.dpms_property, dpms_mode);
}
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
mutex_unlock(&dev->mode_config.mutex);
}
@@ -400,23 +340,23 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
- drm_fb_helper_on(info);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
- drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
- drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
+ drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
break;
}
return 0;
@@ -430,8 +370,11 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
for (i = 0; i < helper->connector_count; i++)
kfree(helper->connector_info[i]);
kfree(helper->connector_info);
- for (i = 0; i < helper->crtc_count; i++)
+ for (i = 0; i < helper->crtc_count; i++) {
kfree(helper->crtc_info[i].mode_set.connectors);
+ if (helper->crtc_info[i].mode_set.mode)
+ drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ }
kfree(helper->crtc_info);
}
@@ -474,11 +417,10 @@ int drm_fb_helper_init(struct drm_device *dev,
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- fb_helper->crtc_info[i].crtc_id = crtc->base.id;
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
- fb_helper->conn_limit = max_conn_count;
+
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 6263b01..7348a3d 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -133,6 +133,9 @@ int drm_open(struct inode *inode, struct file *filp)
if (!(dev = minor->dev))
return -ENODEV;
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
retcode = drm_open_helper(inode, filp, dev);
if (!retcode) {
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
@@ -181,6 +184,9 @@ int drm_stub_open(struct inode *inode, struct file *filp)
if (!(dev = minor->dev))
goto out;
+ if (drm_device_is_unplugged(dev))
+ goto out;
+
old_fops = filp->f_op;
filp->f_op = fops_get(dev->driver->fops);
if (filp->f_op == NULL) {
@@ -579,6 +585,8 @@ int drm_release(struct inode *inode, struct file *filp)
retcode = -EBUSY;
} else
retcode = drm_lastclose(dev);
+ if (drm_device_is_unplugged(dev))
+ drm_put_dev(dev);
}
mutex_unlock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f8625e2..0ef358e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -661,6 +661,9 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_hash_item *hash;
int ret = 0;
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
@@ -700,7 +703,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
*/
drm_gem_object_reference(obj);
- vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
out_unlock:
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 956fd38..cf85155 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -37,6 +37,7 @@
#include "drm_core.h"
#include "linux/pci.h"
+#include "linux/export.h"
/**
* Get the bus id.
@@ -276,6 +277,12 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_VBLANK_HIGH_CRTC:
req->value = 1;
break;
+ case DRM_CAP_DUMB_PREFERRED_DEPTH:
+ req->value = dev->mode_config.preferred_depth;
+ break;
+ case DRM_CAP_DUMB_PREFER_SHADOW:
+ req->value = dev->mode_config.prefer_shadow;
+ break;
default:
return -EINVAL;
}
@@ -346,3 +353,4 @@ int drm_noop(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
return 0;
}
+EXPORT_SYMBOL(drm_noop);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 44a5d0a..c869436 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -305,7 +305,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
* \param dev DRM device.
*
* Initializes the IRQ related data. Installs the handler, calling the driver
- * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * \c irq_preinstall() and \c irq_postinstall() functions
* before and after the installation.
*/
int drm_irq_install(struct drm_device *dev)
@@ -385,7 +385,7 @@ EXPORT_SYMBOL(drm_irq_install);
*
* \param dev DRM device.
*
- * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ * Calls the driver's \c irq_uninstall() function, and stops the irq.
*/
int drm_irq_uninstall(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index c8b6b66..c86a0f1 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -37,25 +37,6 @@
#include <linux/export.h>
#include "drmP.h"
-/**
- * Called when "/proc/dri/%dev%/mem" is read.
- *
- * \param buf output buffer.
- * \param start start of output data.
- * \param offset requested start offset.
- * \param len requested number of bytes.
- * \param eof whether there is no more data to return.
- * \param data private data.
- * \return number of written bytes.
- *
- * No-op.
- */
-int drm_mem_info(char *buf, char **start, off_t offset,
- int len, int *eof, void *data)
-{
- return 0;
-}
-
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fb8e46b..b7adb4a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -686,8 +686,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
-
- p->crtc_vtotal |= 1;
}
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
@@ -716,6 +714,27 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
/**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+ * LOCKING:
+ * None.
+ *
+ * Copy an existing mode into another mode, preserving the object id
+ * of the destination mode.
+ */
+void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
+{
+ int id = dst->base.id;
+
+ *dst = *src;
+ dst->base.id = id;
+ INIT_LIST_HEAD(&dst->head);
+}
+EXPORT_SYMBOL(drm_mode_copy);
+
+/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @m: mode to duplicate
*
@@ -729,16 +748,13 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
- int new_id;
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
- new_id = nmode->base.id;
- *nmode = *mode;
- nmode->base.id = new_id;
- INIT_LIST_HEAD(&nmode->head);
+ drm_mode_copy(nmode, mode);
+
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index d4d10b7..13f3d93 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -324,8 +324,6 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (ret)
goto err_g1;
- pci_set_master(pdev);
-
dev->pdev = pdev;
dev->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index ae9db5e..82431dc 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -122,7 +122,7 @@ static const char *drm_platform_get_name(struct drm_device *dev)
static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
{
- int len, ret;
+ int len, ret, id;
master->unique_len = 13 + strlen(dev->platformdev->name);
master->unique_size = master->unique_len;
@@ -131,8 +131,16 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
if (master->unique == NULL)
return -ENOMEM;
+ id = dev->platformdev->id;
+
+ /* if only a single instance of the platform device, id will be
+ * set to -1.. use 0 instead to avoid a funny looking bus-id:
+ */
+ if (id == -1)
+ id = 0;
+
len = snprintf(master->unique, master->unique_len,
- "platform:%s:%02d", dev->platformdev->name, dev->platformdev->id);
+ "platform:%s:%02d", dev->platformdev->name, id);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 6d7b083..aa454f8 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -319,6 +319,7 @@ int drm_fill_in_dev(struct drm_device *dev,
drm_lastclose(dev);
return retcode;
}
+EXPORT_SYMBOL(drm_fill_in_dev);
/**
@@ -397,6 +398,7 @@ err_idr:
*minor = NULL;
return ret;
}
+EXPORT_SYMBOL(drm_get_minor);
/**
* Put a secondary minor number.
@@ -428,6 +430,12 @@ int drm_put_minor(struct drm_minor **minor_p)
*minor_p = NULL;
return 0;
}
+EXPORT_SYMBOL(drm_put_minor);
+
+static void drm_unplug_minor(struct drm_minor *minor)
+{
+ drm_sysfs_device_remove(minor);
+}
/**
* Called via drm_exit() at module unload time or when pci device is
@@ -492,3 +500,21 @@ void drm_put_dev(struct drm_device *dev)
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
+
+void drm_unplug_dev(struct drm_device *dev)
+{
+ /* for a USB device */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_unplug_minor(dev->control);
+ drm_unplug_minor(dev->primary);
+
+ mutex_lock(&drm_global_mutex);
+
+ drm_device_set_unplugged(dev);
+
+ if (dev->open_count == 0) {
+ drm_put_dev(dev);
+ }
+ mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 62c3675..5a7bd51 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -454,6 +454,8 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
{
int i;
+ if (!connector->kdev.parent)
+ return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
drm_get_connector_name(connector));
@@ -461,6 +463,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
device_remove_file(&connector->kdev, &connector_attrs[i]);
sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
device_unregister(&connector->kdev);
+ connector->kdev.parent = NULL;
}
EXPORT_SYMBOL(drm_sysfs_connector_remove);
@@ -533,7 +536,9 @@ err_out:
*/
void drm_sysfs_device_remove(struct drm_minor *minor)
{
- device_unregister(&minor->kdev);
+ if (minor->kdev.parent)
+ device_unregister(&minor->kdev);
+ minor->kdev.parent = NULL;
}
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 445003f..c8c83da 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -2,7 +2,6 @@
#include <linux/usb.h>
#include <linux/export.h>
-#ifdef CONFIG_USB
int drm_get_usb_dev(struct usb_interface *interface,
const struct usb_device_id *id,
struct drm_driver *driver)
@@ -115,4 +114,3 @@ void drm_usb_exit(struct drm_driver *driver,
usb_deregister(udriver);
}
EXPORT_SYMBOL(drm_usb_exit);
-#endif
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 8c03eaf..1495618 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -519,7 +519,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
return 0;
}
@@ -671,7 +670,6 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
return 0;
}
@@ -682,6 +680,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
int ret;
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index b9e5266c..3343ac4 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,7 +1,6 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on DRM && PLAT_SAMSUNG
- default n
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -12,16 +11,19 @@ config DRM_EXYNOS
If M is selected the module will be called exynosdrm.
config DRM_EXYNOS_FIMD
- tristate "Exynos DRM FIMD"
+ bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
- default n
help
Choose this option if you want to use Exynos FIMD for DRM.
- If M is selected, the module will be called exynos_drm_fimd
config DRM_EXYNOS_HDMI
- tristate "Exynos DRM HDMI"
+ bool "Exynos DRM HDMI"
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
help
Choose this option if you want to use Exynos HDMI for DRM.
- If M is selected, the module will be called exynos_drm_hdmi
+
+config DRM_EXYNOS_VIDI
+ bool "Exynos DRM Virtual Display"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use Exynos VIDI for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 395e69c..9e0bff8 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,7 +8,10 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
-obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
-obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
-obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
- exynos_hdmiphy.o exynos_drm_hdmi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
+ exynos_ddc.o exynos_hdmiphy.o \
+ exynos_drm_hdmi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
+
+obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 84b614f..7e1051d 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -55,4 +55,3 @@ struct i2c_driver ddc_driver = {
.remove = __devexit_p(s5p_ddc_remove),
.command = NULL,
};
-EXPORT_SYMBOL(ddc_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 3cf785c..4a3a5f7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -25,45 +25,161 @@
#include "drmP.h"
#include "drm.h"
+#include "exynos_drm.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
static int lowlevel_buffer_allocate(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer)
+ unsigned int flags, struct exynos_drm_gem_buf *buf)
{
+ dma_addr_t start_addr, end_addr;
+ unsigned int npages, page_size, i = 0;
+ struct scatterlist *sgl;
+ int ret = 0;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
- &buffer->dma_addr, GFP_KERNEL);
- if (!buffer->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
+ if (flags & EXYNOS_BO_NONCONTIG) {
+ DRM_DEBUG_KMS("not support allocation type.\n");
+ return -EINVAL;
+ }
+
+ if (buf->dma_addr) {
+ DRM_DEBUG_KMS("already allocated.\n");
+ return 0;
+ }
+
+ if (buf->size >= SZ_1M) {
+ npages = (buf->size >> SECTION_SHIFT) + 1;
+ page_size = SECTION_SIZE;
+ } else if (buf->size >= SZ_64K) {
+ npages = (buf->size >> 16) + 1;
+ page_size = SZ_64K;
+ } else {
+ npages = (buf->size >> PAGE_SHIFT) + 1;
+ page_size = PAGE_SIZE;
+ }
+
+ buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
return -ENOMEM;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buffer->kvaddr,
- (unsigned long)buffer->dma_addr,
- buffer->size);
+ ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize sg table.\n");
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+ return -ENOMEM;
+ }
- return 0;
+ buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL);
+ if (!buf->kvaddr) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ start_addr = buf->dma_addr;
+ end_addr = buf->dma_addr + buf->size;
+
+ buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ start_addr = buf->dma_addr;
+ end_addr = buf->dma_addr + buf->size;
+
+ buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ sgl = buf->sgt->sgl;
+
+ while (i < npages) {
+ buf->pages[i] = phys_to_page(start_addr);
+ sg_set_page(sgl, buf->pages[i], page_size, 0);
+ sg_dma_address(sgl) = start_addr;
+ start_addr += page_size;
+ if (end_addr - start_addr < page_size)
+ break;
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ buf->pages[i] = phys_to_page(start_addr);
+
+ sgl = sg_next(sgl);
+ sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
+
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buf->kvaddr,
+ (unsigned long)buf->dma_addr,
+ buf->size);
+
+ return ret;
+err2:
+ dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr);
+ buf->dma_addr = (dma_addr_t)NULL;
+err1:
+ sg_free_table(buf->sgt);
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+
+ return ret;
}
static void lowlevel_buffer_deallocate(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer)
+ unsigned int flags, struct exynos_drm_gem_buf *buf)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (buffer->dma_addr && buffer->size)
- dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
- (dma_addr_t)buffer->dma_addr);
- else
- DRM_DEBUG_KMS("buffer data are invalid.\n");
+ /*
+ * release only physically continuous memory and
+ * non-continuous memory would be released by exynos
+ * gem framework.
+ */
+ if (flags & EXYNOS_BO_NONCONTIG) {
+ DRM_DEBUG_KMS("not support allocation type.\n");
+ return;
+ }
+
+ if (!buf->dma_addr) {
+ DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buf->kvaddr,
+ (unsigned long)buf->dma_addr,
+ buf->size);
+
+ sg_free_table(buf->sgt);
+
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+
+ kfree(buf->pages);
+ buf->pages = NULL;
+
+ dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr);
+ buf->dma_addr = (dma_addr_t)NULL;
}
-struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
- unsigned int size)
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+ unsigned int size)
{
struct exynos_drm_gem_buf *buffer;
@@ -77,21 +193,11 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
}
buffer->size = size;
-
- /*
- * allocate memory region with size and set the memory information
- * to vaddr and dma_addr of a buffer object.
- */
- if (lowlevel_buffer_allocate(dev, buffer) < 0) {
- kfree(buffer);
- return NULL;
- }
-
return buffer;
}
-void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer)
+void exynos_drm_fini_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
@@ -100,12 +206,27 @@ void exynos_drm_buf_destroy(struct drm_device *dev,
return;
}
- lowlevel_buffer_deallocate(dev, buffer);
-
kfree(buffer);
buffer = NULL;
}
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module");
-MODULE_LICENSE("GPL");
+int exynos_drm_alloc_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buf, unsigned int flags)
+{
+
+ /*
+ * allocate memory region and set the memory information
+ * to vaddr and dma_addr of a buffer object.
+ */
+ if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void exynos_drm_free_buf(struct drm_device *dev,
+ unsigned int flags, struct exynos_drm_gem_buf *buffer)
+{
+
+ lowlevel_buffer_deallocate(dev, flags, buffer);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index c913f2b..3388e4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,12 +26,22 @@
#ifndef _EXYNOS_DRM_BUF_H_
#define _EXYNOS_DRM_BUF_H_
-/* allocate physical memory. */
-struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
- unsigned int size);
+/* create and initialize buffer object. */
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+ unsigned int size);
-/* remove allocated physical memory. */
-void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_gem_buf *buffer);
+/* destroy buffer object. */
+void exynos_drm_fini_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buffer);
+
+/* allocate physical memory region and setup sgt and pages. */
+int exynos_drm_alloc_buf(struct drm_device *dev,
+ struct exynos_drm_gem_buf *buf,
+ unsigned int flags);
+
+/* release physical memory region, sgt and pages. */
+void exynos_drm_free_buf(struct drm_device *dev,
+ unsigned int flags,
+ struct exynos_drm_gem_buf *buffer);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 99d5527..bf791fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -225,6 +225,29 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
.best_encoder = exynos_drm_best_encoder,
};
+static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
+ unsigned int max_width, unsigned int max_height)
+{
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_manager_ops *ops = manager->ops;
+ unsigned int width, height;
+
+ width = max_width;
+ height = max_height;
+
+ /*
+ * if specific driver want to find desired_mode using maxmum
+ * resolution then get max width and height from that driver.
+ */
+ if (ops && ops->get_max_resol)
+ ops->get_max_resol(manager->dev, &width, &height);
+
+ return drm_helper_probe_single_connector_modes(connector, width,
+ height);
+}
+
/* get detection status of display device. */
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
@@ -262,7 +285,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
static struct drm_connector_funcs exynos_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ .fill_modes = exynos_drm_connector_fill_modes,
.detect = exynos_drm_connector_detect,
.destroy = exynos_drm_connector_destroy,
};
@@ -292,6 +315,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector->interlace_allowed = true;
connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
+ case EXYNOS_DISPLAY_TYPE_VIDI:
+ type = DRM_MODE_CONNECTOR_VIRTUAL;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
break;
@@ -325,9 +352,3 @@ err_connector:
kfree(exynos_connector);
return NULL;
}
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM Connector Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index d08a558..411832e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -32,7 +32,6 @@
#include "exynos_drm_connector.h"
#include "exynos_drm_fbdev.h"
-static DEFINE_MUTEX(exynos_drm_mutex);
static LIST_HEAD(exynos_drm_subdrv_list);
static struct drm_device *drm_dev;
@@ -60,6 +59,9 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev,
return ret;
}
+ if (subdrv->is_local)
+ return 0;
+
/* create and initialize a encoder for this sub driver. */
encoder = exynos_drm_encoder_create(dev, &subdrv->manager,
(1 << MAX_CRTC) - 1);
@@ -116,13 +118,10 @@ int exynos_drm_device_register(struct drm_device *dev)
if (!dev)
return -EINVAL;
- if (drm_dev) {
- DRM_ERROR("Already drm device were registered\n");
- return -EBUSY;
- }
+ drm_dev = dev;
- mutex_lock(&exynos_drm_mutex);
list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
+ subdrv->drm_dev = dev;
err = exynos_drm_subdrv_probe(dev, subdrv);
if (err) {
DRM_DEBUG("exynos drm subdrv probe failed.\n");
@@ -130,9 +129,6 @@ int exynos_drm_device_register(struct drm_device *dev)
}
}
- drm_dev = dev;
- mutex_unlock(&exynos_drm_mutex);
-
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_register);
@@ -143,86 +139,28 @@ int exynos_drm_device_unregister(struct drm_device *dev)
DRM_DEBUG_DRIVER("%s\n", __FILE__);
- if (!dev || dev != drm_dev) {
+ if (!dev) {
WARN(1, "Unexpected drm device unregister!\n");
return -EINVAL;
}
- mutex_lock(&exynos_drm_mutex);
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
exynos_drm_subdrv_remove(dev, subdrv);
drm_dev = NULL;
- mutex_unlock(&exynos_drm_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
-static int exynos_drm_mode_group_reinit(struct drm_device *dev)
-{
- struct drm_mode_group *group = &dev->primary->mode_group;
- uint32_t *id_list = group->id_list;
- int ret;
-
- DRM_DEBUG_DRIVER("%s\n", __FILE__);
-
- ret = drm_mode_group_init_legacy_group(dev, group);
- if (ret < 0)
- return ret;
-
- kfree(id_list);
- return 0;
-}
-
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
{
- int err;
-
DRM_DEBUG_DRIVER("%s\n", __FILE__);
if (!subdrv)
return -EINVAL;
- mutex_lock(&exynos_drm_mutex);
- if (drm_dev) {
- err = exynos_drm_subdrv_probe(drm_dev, subdrv);
- if (err) {
- DRM_ERROR("failed to probe exynos drm subdrv\n");
- mutex_unlock(&exynos_drm_mutex);
- return err;
- }
-
- /* setup possible_clones. */
- exynos_drm_encoder_setup(drm_dev);
-
- /*
- * if any specific driver such as fimd or hdmi driver called
- * exynos_drm_subdrv_register() later than drm_load(),
- * the fb helper should be re-initialized and re-configured.
- */
- err = exynos_drm_fbdev_reinit(drm_dev);
- if (err) {
- DRM_ERROR("failed to reinitialize exynos drm fbdev\n");
- exynos_drm_subdrv_remove(drm_dev, subdrv);
- mutex_unlock(&exynos_drm_mutex);
- return err;
- }
-
- err = exynos_drm_mode_group_reinit(drm_dev);
- if (err) {
- DRM_ERROR("failed to reinitialize mode group\n");
- exynos_drm_fbdev_fini(drm_dev);
- exynos_drm_subdrv_remove(drm_dev, subdrv);
- mutex_unlock(&exynos_drm_mutex);
- return err;
- }
- }
-
- subdrv->drm_dev = drm_dev;
-
list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
- mutex_unlock(&exynos_drm_mutex);
return 0;
}
@@ -230,46 +168,48 @@ EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
{
- int ret = -EFAULT;
-
DRM_DEBUG_DRIVER("%s\n", __FILE__);
- if (!subdrv) {
- DRM_DEBUG("Unexpected exynos drm subdrv unregister!\n");
- return ret;
- }
+ if (!subdrv)
+ return -EINVAL;
- mutex_lock(&exynos_drm_mutex);
- if (drm_dev) {
- exynos_drm_subdrv_remove(drm_dev, subdrv);
- list_del(&subdrv->list);
+ list_del(&subdrv->list);
- /*
- * fb helper should be updated once a sub driver is released
- * to re-configure crtc and connector and also to re-setup
- * drm framebuffer.
- */
- ret = exynos_drm_fbdev_reinit(drm_dev);
- if (ret < 0) {
- DRM_ERROR("failed fb helper reinit.\n");
- goto fail;
- }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
- ret = exynos_drm_mode_group_reinit(drm_dev);
- if (ret < 0) {
- DRM_ERROR("failed drm mode group reinit.\n");
- goto fail;
+int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+ if (subdrv->open) {
+ ret = subdrv->open(dev, subdrv->manager.dev, file);
+ if (ret)
+ goto err;
}
}
-fail:
- mutex_unlock(&exynos_drm_mutex);
+ return 0;
+
+err:
+ list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+ if (subdrv->close)
+ subdrv->close(dev, subdrv->manager.dev, file);
+ }
return ret;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
+
+void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct exynos_drm_subdrv *subdrv;
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM Core Driver");
-MODULE_LICENSE("GPL");
+ list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+ if (subdrv->close)
+ subdrv->close(dev, subdrv->manager.dev, file);
+ }
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index de81883..3486ffe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -249,7 +249,11 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- mode = adjusted_mode;
+ /*
+ * copy the mode data adjusted by mode_fixup() into crtc->mode
+ * so that hardware can be seet to proper mode.
+ */
+ memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
return exynos_drm_crtc_update(crtc);
}
@@ -426,9 +430,3 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_disable_vblank);
}
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM CRTC Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 09cc13f..a6819b5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -38,6 +38,7 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
+#include "exynos_drm_vidi.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -144,11 +145,34 @@ static int exynos_drm_unload(struct drm_device *dev)
return 0;
}
+static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ return exynos_drm_subdrv_open(dev, file);
+}
+
static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file)
{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ unsigned long flags;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ /* release events of current file */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry_safe(e, t, &private->pageflip_event_list,
+ base.link) {
+ if (e->base.file_priv == file) {
+ list_del(&e->base.link);
+ e->base.destroy(&e->base);
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ exynos_drm_subdrv_close(dev, file);
}
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
@@ -185,6 +209,8 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
+ vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -202,6 +228,7 @@ static struct drm_driver exynos_drm_driver = {
DRIVER_MODESET | DRIVER_GEM,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
+ .open = exynos_drm_open,
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
.postclose = exynos_drm_postclose,
@@ -252,9 +279,60 @@ static struct platform_driver exynos_drm_platform_driver = {
static int __init exynos_drm_init(void)
{
+ int ret;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
- return platform_driver_register(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ ret = platform_driver_register(&fimd_driver);
+ if (ret < 0)
+ goto out_fimd;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+ ret = platform_driver_register(&hdmi_driver);
+ if (ret < 0)
+ goto out_hdmi;
+ ret = platform_driver_register(&mixer_driver);
+ if (ret < 0)
+ goto out_mixer;
+ ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
+ if (ret < 0)
+ goto out_common_hdmi;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ ret = platform_driver_register(&vidi_driver);
+ if (ret < 0)
+ goto out_vidi;
+#endif
+
+ ret = platform_driver_register(&exynos_drm_platform_driver);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+out_vidi:
+ platform_driver_unregister(&vidi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+ platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+out_common_hdmi:
+ platform_driver_unregister(&mixer_driver);
+out_mixer:
+ platform_driver_unregister(&hdmi_driver);
+out_hdmi:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ platform_driver_unregister(&fimd_driver);
+out_fimd:
+#endif
+ return ret;
}
static void __exit exynos_drm_exit(void)
@@ -262,6 +340,20 @@ static void __exit exynos_drm_exit(void)
DRM_DEBUG_DRIVER("%s\n", __FILE__);
platform_driver_unregister(&exynos_drm_platform_driver);
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+ platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+ platform_driver_unregister(&mixer_driver);
+ platform_driver_unregister(&hdmi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+ platform_driver_unregister(&vidi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+ platform_driver_unregister(&fimd_driver);
+#endif
}
module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 13540de..fbd0a23 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -32,9 +32,9 @@
#include <linux/module.h>
#include "drm.h"
-#define MAX_CRTC 2
+#define MAX_CRTC 3
#define MAX_PLANE 5
-#define MAX_FB_BUFFER 3
+#define MAX_FB_BUFFER 4
#define DEFAULT_ZPOS -1
struct drm_device;
@@ -50,6 +50,8 @@ enum exynos_drm_output_type {
EXYNOS_DISPLAY_TYPE_LCD,
/* HDMI Interface. */
EXYNOS_DISPLAY_TYPE_HDMI,
+ /* Virtual Display Interface. */
+ EXYNOS_DISPLAY_TYPE_VIDI,
};
/*
@@ -155,8 +157,10 @@ struct exynos_drm_display_ops {
*
* @dpms: control device power.
* @apply: set timing, vblank and overlay data to registers.
+ * @mode_fixup: fix mode data comparing to hw specific display mode.
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
+ * @get_max_resol: get maximum resolution to specific hardware.
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -164,7 +168,13 @@ struct exynos_drm_display_ops {
struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode);
void (*apply)(struct device *subdrv_dev);
+ void (*mode_fixup)(struct device *subdrv_dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
void (*mode_set)(struct device *subdrv_dev, void *mode);
+ void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width,
+ unsigned int *height);
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
@@ -217,10 +227,13 @@ struct exynos_drm_private {
* @list: sub driver has its own list object to register to exynos drm driver.
* @drm_dev: pointer to drm_device and this pointer would be set
* when sub driver calls exynos_drm_subdrv_register().
+ * @is_local: appear encoder and connector disrelated device.
* @probe: this callback would be called by exynos drm driver after
* subdrv is registered to it.
* @remove: this callback is used to release resources created
* by probe callback.
+ * @open: this would be called with drm device file open.
+ * @close: this would be called with drm device file close.
* @manager: subdrv has its own manager to control a hardware appropriately
* and we can access a hardware drawing on this manager.
* @encoder: encoder object owned by this sub driver.
@@ -229,9 +242,14 @@ struct exynos_drm_private {
struct exynos_drm_subdrv {
struct list_head list;
struct drm_device *drm_dev;
+ bool is_local;
int (*probe)(struct drm_device *drm_dev, struct device *dev);
void (*remove)(struct drm_device *dev);
+ int (*open)(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file);
+ void (*close)(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file);
struct exynos_drm_manager manager;
struct drm_encoder *encoder;
@@ -254,15 +272,19 @@ int exynos_drm_device_unregister(struct drm_device *dev);
* this function would be called by sub drivers such as display controller
* or hdmi driver to register this sub driver object to exynos drm driver
* and when a sub driver is registered to exynos drm driver a probe callback
- * of the sub driver is called and creates its own encoder and connector
- * and then fb helper and drm mode group would be re-initialized.
+ * of the sub driver is called and creates its own encoder and connector.
*/
int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
-/*
- * this function removes subdrv list from exynos drm driver and fb helper
- * and drm mode group would be re-initialized.
- */
+/* this function removes subdrv list from exynos drm driver */
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
+int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
+void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+
+extern struct platform_driver fimd_driver;
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver mixer_driver;
+extern struct platform_driver exynos_drm_common_hdmi_driver;
+extern struct platform_driver vidi_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index ef4754f..6e9ac7b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -111,9 +111,19 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* drm framework doesn't check NULL. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ if (manager_ops && manager_ops->mode_fixup)
+ manager_ops->mode_fixup(manager->dev, connector,
+ mode, adjusted_mode);
+ }
return true;
}
@@ -132,12 +142,11 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_KMS("%s\n", __FILE__);
- mode = adjusted_mode;
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
if (manager_ops && manager_ops->mode_set)
- manager_ops->mode_set(manager->dev, mode);
+ manager_ops->mode_set(manager->dev,
+ adjusted_mode);
if (overlay_ops && overlay_ops->mode_set)
overlay_ops->mode_set(manager->dev, overlay);
@@ -209,6 +218,7 @@ static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
switch (display_ops->type) {
case EXYNOS_DISPLAY_TYPE_LCD:
case EXYNOS_DISPLAY_TYPE_HDMI:
+ case EXYNOS_DISPLAY_TYPE_VIDI:
clone_mask |= (1 << (cnt++));
break;
default:
@@ -433,9 +443,3 @@ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
}
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM Encoder Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 3733fe6..c38c8f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -211,9 +211,3 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
}
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM FB Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 54f8f07..d5586cc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -125,7 +125,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
}
size = mode_cmd.pitches[0] * mode_cmd.height;
- exynos_gem_obj = exynos_drm_gem_create(dev, size);
+
+ /* 0 means to allocate physically continuous memory */
+ exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
goto out;
@@ -314,89 +316,3 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
drm_fb_helper_restore_fbdev_mode(private->fb_helper);
}
-
-int exynos_drm_fbdev_reinit(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!private)
- return -EINVAL;
-
- /*
- * if all sub drivers were unloaded then num_connector is 0
- * so at this time, the framebuffers also should be destroyed.
- */
- if (!dev->mode_config.num_connector) {
- exynos_drm_fbdev_fini(dev);
- return 0;
- }
-
- fb_helper = private->fb_helper;
-
- if (fb_helper) {
- struct list_head temp_list;
-
- INIT_LIST_HEAD(&temp_list);
-
- /*
- * fb_helper is reintialized but kernel fb is reused
- * so kernel_fb_list need to be backuped and restored
- */
- if (!list_empty(&fb_helper->kernel_fb_list))
- list_replace_init(&fb_helper->kernel_fb_list,
- &temp_list);
-
- drm_fb_helper_fini(fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper,
- dev->mode_config.num_crtc, MAX_CONNECTOR);
- if (ret < 0) {
- DRM_ERROR("failed to initialize drm fb helper\n");
- return ret;
- }
-
- if (!list_empty(&temp_list))
- list_replace(&temp_list, &fb_helper->kernel_fb_list);
-
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret < 0) {
- DRM_ERROR("failed to add fb helper to connectors\n");
- goto err;
- }
-
- ret = drm_fb_helper_initial_config(fb_helper, PREFERRED_BPP);
- if (ret < 0) {
- DRM_ERROR("failed to set up hw configuration.\n");
- goto err;
- }
- } else {
- /*
- * if drm_load() failed whem drm load() was called prior
- * to specific drivers, fb_helper must be NULL and so
- * this fuction should be called again to re-initialize and
- * re-configure the fb helper. it means that this function
- * has been called by the specific drivers.
- */
- ret = exynos_drm_fbdev_init(dev);
- }
-
- return ret;
-
-err:
- /*
- * if drm_load() failed when drm load() was called prior
- * to specific drivers, the fb_helper must be NULL and so check it.
- */
- if (fb_helper)
- drm_fb_helper_fini(fb_helper);
-
- return ret;
-}
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM FBDEV Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 56458ee..ecb6db2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1007,7 +1007,7 @@ static const struct dev_pm_ops fimd_pm_ops = {
SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
};
-static struct platform_driver fimd_driver = {
+struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = __devexit_p(fimd_remove),
.driver = {
@@ -1016,21 +1016,3 @@ static struct platform_driver fimd_driver = {
.pm = &fimd_pm_ops,
},
};
-
-static int __init fimd_init(void)
-{
- return platform_driver_register(&fimd_driver);
-}
-
-static void __exit fimd_exit(void)
-{
- platform_driver_unregister(&fimd_driver);
-}
-
-module_init(fimd_init);
-module_exit(fimd_exit);
-
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung DRM FIMD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 025abb3..fa1aa94 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "drm.h"
+#include <linux/shmem_fs.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -55,6 +56,178 @@ static unsigned int convert_to_vm_err_msg(int msg)
return out_msg;
}
+static unsigned int mask_gem_flags(unsigned int flags)
+{
+ return flags &= EXYNOS_BO_NONCONTIG;
+}
+
+static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = obj->filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+ }
+
+ return pages;
+
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
+
+ drm_free_large(pages);
+ return ERR_PTR(PTR_ERR(p));
+}
+
+static void exynos_gem_put_pages(struct drm_gem_object *obj,
+ struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+
+static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+ struct vm_area_struct *vma,
+ unsigned long f_vaddr,
+ pgoff_t page_offset)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ unsigned long pfn;
+
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ unsigned long usize = buf->size;
+
+ if (!buf->pages)
+ return -EINTR;
+
+ while (usize > 0) {
+ pfn = page_to_pfn(buf->pages[page_offset++]);
+ vm_insert_mixed(vma, f_vaddr, pfn);
+ f_vaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ }
+
+ return 0;
+ }
+
+ pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+
+ return vm_insert_mixed(vma, f_vaddr, pfn);
+}
+
+static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
+ struct page **pages;
+ unsigned int npages, i = 0;
+ int ret;
+
+ if (buf->pages) {
+ DRM_DEBUG_KMS("already allocated.\n");
+ return -EINVAL;
+ }
+
+ pages = exynos_gem_get_pages(obj, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ DRM_ERROR("failed to get pages.\n");
+ return PTR_ERR(pages);
+ }
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize sg table.\n");
+ ret = -EFAULT;
+ goto err1;
+ }
+
+ sgl = buf->sgt->sgl;
+
+ /* set all pages to sg list. */
+ while (i < npages) {
+ sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
+ sg_dma_address(sgl) = page_to_phys(pages[i]);
+ i++;
+ sgl = sg_next(sgl);
+ }
+
+ /* add some codes for UNCACHED type here. TODO */
+
+ buf->pages = pages;
+ return ret;
+err1:
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+err:
+ exynos_gem_put_pages(obj, pages, true, false);
+ return ret;
+
+}
+
+static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+
+ /*
+ * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
+ * allocated at gem fault handler.
+ */
+ sg_free_table(buf->sgt);
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+
+ exynos_gem_put_pages(obj, buf->pages, true, false);
+ buf->pages = NULL;
+
+ /* add some codes for UNCACHED type here. TODO */
+}
+
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
struct drm_file *file_priv,
unsigned int *handle)
@@ -90,7 +263,15 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
+ if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
+ exynos_gem_obj->buffer->pages)
+ exynos_drm_gem_put_pages(obj);
+ else
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
+ exynos_gem_obj->buffer);
+
+ exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
+ exynos_gem_obj->buffer = NULL;
if (obj->map_list.map)
drm_gem_free_mmap_offset(obj);
@@ -99,6 +280,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
+ exynos_gem_obj = NULL;
}
static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
@@ -114,6 +296,7 @@ static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
return NULL;
}
+ exynos_gem_obj->size = size;
obj = &exynos_gem_obj->base;
ret = drm_gem_object_init(dev, obj, size);
@@ -129,27 +312,55 @@ static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
}
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
- unsigned long size)
+ unsigned int flags,
+ unsigned long size)
{
- struct exynos_drm_gem_buf *buffer;
struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
+ int ret;
size = roundup(size, PAGE_SIZE);
DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
- buffer = exynos_drm_buf_create(dev, size);
- if (!buffer)
+ flags = mask_gem_flags(flags);
+
+ buf = exynos_drm_init_buf(dev, size);
+ if (!buf)
return ERR_PTR(-ENOMEM);
exynos_gem_obj = exynos_drm_gem_init(dev, size);
if (!exynos_gem_obj) {
- exynos_drm_buf_destroy(dev, buffer);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err;
}
- exynos_gem_obj->buffer = buffer;
+ exynos_gem_obj->buffer = buf;
+
+ /* set memory type and cache attribute from user side. */
+ exynos_gem_obj->flags = flags;
+
+ /*
+ * allocate all pages as desired size if user wants to allocate
+ * physically non-continuous memory.
+ */
+ if (flags & EXYNOS_BO_NONCONTIG) {
+ ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err;
+ }
+ } else {
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err;
+ }
+ }
return exynos_gem_obj;
+err:
+ exynos_drm_fini_buf(dev, buf);
+ return ERR_PTR(ret);
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -161,7 +372,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -175,6 +386,64 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
+void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ DRM_DEBUG_KMS("not support NONCONTIG type.\n");
+ drm_gem_object_unreference_unlocked(obj);
+
+ /* TODO */
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &exynos_gem_obj->buffer->dma_addr;
+}
+
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ DRM_DEBUG_KMS("not support NONCONTIG type.\n");
+ drm_gem_object_unreference_unlocked(obj);
+
+ /* TODO */
+ return;
+ }
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ /*
+ * decrease obj->refcount one more time because we has already
+ * increased it at exynos_drm_gem_get_dma_addr().
+ */
+ drm_gem_object_unreference_unlocked(obj);
+}
+
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -200,7 +469,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size;
+ unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -208,9 +478,9 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
/* in case of direct mapping, always having non-cachable attribute */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_file = filp;
- vm_size = vma->vm_end - vma->vm_start;
+ vm_size = usize = vma->vm_end - vma->vm_start;
+
/*
* a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
@@ -221,18 +491,37 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ int i = 0;
+
+ if (!buffer->pages)
+ return -EINVAL;
+
+ do {
+ ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
+ if (ret) {
+ DRM_ERROR("failed to remap user space.\n");
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+ } else {
+ /*
+ * get page frame number to physical memory to be mapped
+ * to user space.
+ */
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT;
+
+ DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
+ vma->vm_page_prot)) {
+ DRM_ERROR("failed to remap pfn range.\n");
+ return -EAGAIN;
+ }
}
return 0;
@@ -312,9 +601,9 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
*/
args->pitch = args->width * args->bpp >> 3;
- args->size = args->pitch * args->height;
+ args->size = PAGE_ALIGN(args->pitch * args->height);
- exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -398,20 +687,31 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *dev = obj->dev;
- unsigned long pfn;
+ unsigned long f_vaddr;
pgoff_t page_offset;
int ret;
page_offset = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
+ f_vaddr = (unsigned long)vmf->virtual_address;
mutex_lock(&dev->struct_mutex);
- pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT) + page_offset;
+ /*
+ * allocate all pages as desired size if user wants to allocate
+ * physically non-continuous memory.
+ */
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ ret = exynos_drm_gem_get_pages(obj);
+ if (ret < 0)
+ goto err;
+ }
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+ ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ if (ret < 0)
+ DRM_ERROR("failed to map pages.\n");
+err:
mutex_unlock(&dev->struct_mutex);
return convert_to_vm_err_msg(ret);
@@ -435,7 +735,3 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 67cdc91..e40fbad 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -36,11 +36,15 @@
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
+ * @sgt: sg table to transfer page data.
+ * @pages: contain all pages to allocated memory region.
* @size: size of allocated memory region.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
dma_addr_t dma_addr;
+ struct sg_table *sgt;
+ struct page **pages;
unsigned long size;
};
@@ -55,6 +59,8 @@ struct exynos_drm_gem_buf {
* by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
+ * @size: total memory size to physically non-continuous memory region.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
@@ -62,6 +68,8 @@ struct exynos_drm_gem_buf {
struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
+ unsigned long size;
+ unsigned int flags;
};
/* destroy a buffer with gem object */
@@ -69,7 +77,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
- unsigned long size);
+ unsigned int flags,
+ unsigned long size);
/*
* request gem object creation and buffer allocation as the size
@@ -79,6 +88,24 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/*
+ * get dma address from gem handle and this function could be used for
+ * other drivers such as 2d/3d acceleration drivers.
+ * with this function call, gem object reference count would be increased.
+ */
+void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv);
+
+/*
+ * put dma address from gem handle and this function could be used for
+ * other drivers such as 2d/3d acceleration drivers.
+ * with this function call, gem object reference count would be decreased.
+ */
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv);
+
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index ed8a319e..14eb26b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -38,7 +38,6 @@ struct drm_hdmi_context {
struct exynos_drm_subdrv subdrv;
struct exynos_drm_hdmi_context *hdmi_ctx;
struct exynos_drm_hdmi_context *mixer_ctx;
- struct work_struct work;
};
void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
@@ -49,7 +48,6 @@ void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
if (display_ops)
hdmi_display_ops = display_ops;
}
-EXPORT_SYMBOL(exynos_drm_display_ops_register);
void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
*manager_ops)
@@ -59,7 +57,6 @@ void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
if (manager_ops)
hdmi_manager_ops = manager_ops;
}
-EXPORT_SYMBOL(exynos_drm_manager_ops_register);
void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
*overlay_ops)
@@ -69,7 +66,6 @@ void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
if (overlay_ops)
hdmi_overlay_ops = overlay_ops;
}
-EXPORT_SYMBOL(exynos_drm_overlay_ops_register);
static bool drm_hdmi_is_connected(struct device *dev)
{
@@ -155,6 +151,20 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
+static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->mode_fixup)
+ hdmi_manager_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector,
+ mode, adjusted_mode);
+}
+
static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
@@ -165,6 +175,18 @@ static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
}
+static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
+ unsigned int *width, unsigned int *height)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->get_max_resol)
+ hdmi_manager_ops->get_max_resol(ctx->hdmi_ctx->ctx, width,
+ height);
+}
+
static void drm_hdmi_commit(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
@@ -200,7 +222,9 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.dpms = drm_hdmi_dpms,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
+ .mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set,
+ .get_max_resol = drm_hdmi_get_max_resol,
.commit = drm_hdmi_commit,
};
@@ -249,7 +273,6 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
struct drm_hdmi_context *ctx;
struct platform_device *pdev = to_platform_device(dev);
struct exynos_drm_common_hdmi_pd *pd;
- int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -270,26 +293,13 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
return -EFAULT;
}
- ret = platform_driver_register(&hdmi_driver);
- if (ret) {
- DRM_DEBUG_KMS("failed to register hdmi driver.\n");
- return ret;
- }
-
- ret = platform_driver_register(&mixer_driver);
- if (ret) {
- DRM_DEBUG_KMS("failed to register mixer driver.\n");
- goto err_hdmidrv;
- }
-
ctx = get_ctx_from_subdrv(subdrv);
ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
to_context(pd->hdmi_dev);
if (!ctx->hdmi_ctx) {
DRM_DEBUG_KMS("hdmi context is null.\n");
- ret = -EFAULT;
- goto err_mixerdrv;
+ return -EFAULT;
}
ctx->hdmi_ctx->drm_dev = drm_dev;
@@ -298,42 +308,12 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
to_context(pd->mixer_dev);
if (!ctx->mixer_ctx) {
DRM_DEBUG_KMS("mixer context is null.\n");
- ret = -EFAULT;
- goto err_mixerdrv;
+ return -EFAULT;
}
ctx->mixer_ctx->drm_dev = drm_dev;
return 0;
-
-err_mixerdrv:
- platform_driver_unregister(&mixer_driver);
-err_hdmidrv:
- platform_driver_unregister(&hdmi_driver);
- return ret;
-}
-
-static void hdmi_subdrv_remove(struct drm_device *drm_dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- platform_driver_unregister(&hdmi_driver);
- platform_driver_unregister(&mixer_driver);
-}
-
-static void exynos_drm_hdmi_late_probe(struct work_struct *work)
-{
- struct drm_hdmi_context *ctx = container_of(work,
- struct drm_hdmi_context, work);
-
- /*
- * this function calls subdrv->probe() so this must be called
- * after probe context.
- *
- * PS. subdrv->probe() will call platform_driver_register() to probe
- * hdmi and mixer driver.
- */
- exynos_drm_subdrv_register(&ctx->subdrv);
}
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
@@ -353,7 +333,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv = &ctx->subdrv;
subdrv->probe = hdmi_subdrv_probe;
- subdrv->remove = hdmi_subdrv_remove;
subdrv->manager.pipe = -1;
subdrv->manager.ops = &drm_hdmi_manager_ops;
subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
@@ -362,9 +341,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, subdrv);
- INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe);
-
- schedule_work(&ctx->work);
+ exynos_drm_subdrv_register(subdrv);
return 0;
}
@@ -400,7 +377,7 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver exynos_drm_common_hdmi_driver = {
+struct platform_driver exynos_drm_common_hdmi_driver = {
.probe = exynos_drm_hdmi_probe,
.remove = __devexit_p(exynos_drm_hdmi_remove),
.driver = {
@@ -409,31 +386,3 @@ static struct platform_driver exynos_drm_common_hdmi_driver = {
.pm = &hdmi_pm_ops,
},
};
-
-static int __init exynos_drm_hdmi_init(void)
-{
- int ret;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
- if (ret) {
- DRM_DEBUG_KMS("failed to register hdmi common driver.\n");
- return ret;
- }
-
- return ret;
-}
-
-static void __exit exynos_drm_hdmi_exit(void)
-{
- platform_driver_unregister(&exynos_drm_common_hdmi_driver);
-}
-
-module_init(exynos_drm_hdmi_init);
-module_exit(exynos_drm_hdmi_exit);
-
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 3c29f79..44497cf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -47,7 +47,12 @@ struct exynos_hdmi_display_ops {
};
struct exynos_hdmi_manager_ops {
+ void (*mode_fixup)(void *ctx, struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
void (*mode_set)(void *ctx, void *mode);
+ void (*get_max_resol)(void *ctx, unsigned int *width,
+ unsigned int *height);
void (*commit)(void *ctx);
void (*disable)(void *ctx);
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index bdcf770..c277a3a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -22,6 +22,10 @@ struct exynos_plane {
bool enabled;
};
+static const uint32_t formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
static int
exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -115,9 +119,9 @@ int exynos_plane_init(struct drm_device *dev, unsigned int nr)
exynos_plane->overlay.zpos = DEFAULT_ZPOS;
- /* TODO: format */
return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
- &exynos_plane_funcs, NULL, 0, false);
+ &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
+ false);
}
int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
new file mode 100644
index 0000000..8e1339f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -0,0 +1,676 @@
+/* exynos_drm_vidi.c
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/exynos_drm.h>
+
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+
+/* vidi has totally three virtual windows. */
+#define WINDOWS_NR 3
+
+#define get_vidi_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+struct vidi_win_data {
+ unsigned int offset_x;
+ unsigned int offset_y;
+ unsigned int ovl_width;
+ unsigned int ovl_height;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int bpp;
+ dma_addr_t dma_addr;
+ void __iomem *vaddr;
+ unsigned int buf_offsize;
+ unsigned int line_size; /* bytes */
+ bool enabled;
+};
+
+struct vidi_context {
+ struct exynos_drm_subdrv subdrv;
+ struct drm_crtc *crtc;
+ struct vidi_win_data win_data[WINDOWS_NR];
+ struct edid *raw_edid;
+ unsigned int clkdiv;
+ unsigned int default_win;
+ unsigned long irq_flags;
+ unsigned int connected;
+ bool vblank_on;
+ bool suspended;
+ struct work_struct work;
+ struct mutex lock;
+};
+
+static const char fake_edid_info[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
+ 0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd,
+ 0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00,
+ 0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e,
+ 0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00,
+ 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1,
+ 0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83,
+ 0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00,
+ 0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c,
+ 0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a,
+ 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00,
+ 0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x06
+};
+
+static void vidi_fake_vblank_handler(struct work_struct *work);
+
+static bool vidi_display_is_connected(struct device *dev)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * connection request would come from user side
+ * to do hotplug through specific ioctl.
+ */
+ return ctx->connected ? true : false;
+}
+
+static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
+ u8 *edid, int len)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+ struct edid *raw_edid;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * the edid data comes from user side and it would be set
+ * to ctx->raw_edid through specific ioctl.
+ */
+ if (!ctx->raw_edid) {
+ DRM_DEBUG_KMS("raw_edid is null.\n");
+ return -EFAULT;
+ }
+
+ raw_edid = kzalloc(len, GFP_KERNEL);
+ if (!raw_edid) {
+ DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+ return -ENOMEM;
+ }
+
+ memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
+ * EDID_LENGTH, len));
+
+ /* attach the edid data to connector. */
+ connector->display_info.raw_edid = (char *)raw_edid;
+
+ memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
+ * EDID_LENGTH, len));
+
+ return 0;
+}
+
+static void *vidi_get_panel(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO. */
+
+ return NULL;
+}
+
+static int vidi_check_timing(struct device *dev, void *timing)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO. */
+
+ return 0;
+}
+
+static int vidi_display_power_on(struct device *dev, int mode)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO */
+
+ return 0;
+}
+
+static struct exynos_drm_display_ops vidi_display_ops = {
+ .type = EXYNOS_DISPLAY_TYPE_VIDI,
+ .is_connected = vidi_display_is_connected,
+ .get_edid = vidi_get_edid,
+ .get_panel = vidi_get_panel,
+ .check_timing = vidi_check_timing,
+ .power_on = vidi_display_power_on,
+};
+
+static void vidi_dpms(struct device *subdrv_dev, int mode)
+{
+ struct vidi_context *ctx = get_vidi_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+ mutex_lock(&ctx->lock);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* TODO. */
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO. */
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ mutex_unlock(&ctx->lock);
+}
+
+static void vidi_apply(struct device *subdrv_dev)
+{
+ struct vidi_context *ctx = get_vidi_context(subdrv_dev);
+ struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+ struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+ struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+ struct vidi_win_data *win_data;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+ ovl_ops->commit(subdrv_dev, i);
+ }
+
+ if (mgr_ops && mgr_ops->commit)
+ mgr_ops->commit(subdrv_dev);
+}
+
+static void vidi_commit(struct device *dev)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (ctx->suspended)
+ return;
+}
+
+static int vidi_enable_vblank(struct device *dev)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (ctx->suspended)
+ return -EPERM;
+
+ if (!test_and_set_bit(0, &ctx->irq_flags))
+ ctx->vblank_on = true;
+
+ return 0;
+}
+
+static void vidi_disable_vblank(struct device *dev)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (ctx->suspended)
+ return;
+
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ ctx->vblank_on = false;
+}
+
+static struct exynos_drm_manager_ops vidi_manager_ops = {
+ .dpms = vidi_dpms,
+ .apply = vidi_apply,
+ .commit = vidi_commit,
+ .enable_vblank = vidi_enable_vblank,
+ .disable_vblank = vidi_disable_vblank,
+};
+
+static void vidi_win_mode_set(struct device *dev,
+ struct exynos_drm_overlay *overlay)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_win_data *win_data;
+ int win;
+ unsigned long offset;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!overlay) {
+ dev_err(dev, "overlay is NULL\n");
+ return;
+ }
+
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
+ offset = overlay->fb_x * (overlay->bpp >> 3);
+ offset += overlay->fb_y * overlay->pitch;
+
+ DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+
+ win_data = &ctx->win_data[win];
+
+ win_data->offset_x = overlay->crtc_x;
+ win_data->offset_y = overlay->crtc_y;
+ win_data->ovl_width = overlay->crtc_width;
+ win_data->ovl_height = overlay->crtc_height;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+ win_data->dma_addr = overlay->dma_addr[0] + offset;
+ win_data->vaddr = overlay->vaddr[0] + offset;
+ win_data->bpp = overlay->bpp;
+ win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+ (overlay->bpp >> 3);
+ win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+
+ /*
+ * some parts of win_data should be transferred to user side
+ * through specific ioctl.
+ */
+
+ DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+ win_data->offset_x, win_data->offset_y);
+ DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ win_data->ovl_width, win_data->ovl_height);
+ DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
+ (unsigned long)win_data->dma_addr,
+ (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+ overlay->fb_width, overlay->crtc_width);
+}
+
+static void vidi_win_commit(struct device *dev, int zpos)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_win_data *win_data;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (ctx->suspended)
+ return;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+
+ win_data->enabled = true;
+
+ DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+
+ if (ctx->vblank_on)
+ schedule_work(&ctx->work);
+}
+
+static void vidi_win_disable(struct device *dev, int zpos)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+ struct vidi_win_data *win_data;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
+ win_data = &ctx->win_data[win];
+ win_data->enabled = false;
+
+ /* TODO. */
+}
+
+static struct exynos_drm_overlay_ops vidi_overlay_ops = {
+ .mode_set = vidi_win_mode_set,
+ .commit = vidi_win_commit,
+ .disable = vidi_win_disable,
+};
+
+static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+ struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ bool is_checked = false;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (crtc != e->pipe)
+ continue;
+
+ is_checked = true;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ if (is_checked) {
+ /*
+ * call drm_vblank_put only in case that drm_vblank_get was
+ * called.
+ */
+ if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
+ drm_vblank_put(drm_dev, crtc);
+
+ /*
+ * don't off vblank if vblank_disable_allowed is 1,
+ * because vblank would be off by timer handler.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, crtc);
+ }
+
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static void vidi_fake_vblank_handler(struct work_struct *work)
+{
+ struct vidi_context *ctx = container_of(work, struct vidi_context,
+ work);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+
+ if (manager->pipe < 0)
+ return;
+
+ /* refresh rate is about 50Hz. */
+ usleep_range(16000, 20000);
+
+ drm_handle_vblank(subdrv->drm_dev, manager->pipe);
+ vidi_finish_pageflip(subdrv->drm_dev, manager->pipe);
+}
+
+static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = 1, we can use the vblank feature.
+ *
+ * P.S. note that we wouldn't use drm irq handler but
+ * just specific driver own one instead because
+ * drm framework supports only one irq handler.
+ */
+ drm_dev->irq_enabled = 1;
+
+ /*
+ * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = 1;
+
+ return 0;
+}
+
+static void vidi_subdrv_remove(struct drm_device *drm_dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* TODO. */
+}
+
+static int vidi_power_on(struct vidi_context *ctx, bool enable)
+{
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct device *dev = subdrv->manager.dev;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (enable != false && enable != true)
+ return -EINVAL;
+
+ if (enable) {
+ ctx->suspended = false;
+
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ vidi_enable_vblank(dev);
+
+ vidi_apply(dev);
+ } else {
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static int vidi_show_connection(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rc;
+ struct vidi_context *ctx = get_vidi_context(dev);
+
+ mutex_lock(&ctx->lock);
+
+ rc = sprintf(buf, "%d\n", ctx->connected);
+
+ mutex_unlock(&ctx->lock);
+
+ return rc;
+}
+
+static int vidi_store_connection(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = kstrtoint(buf, 0, &ctx->connected);
+ if (ret)
+ return ret;
+
+ if (ctx->connected > 1)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("requested connection.\n");
+
+ drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
+
+ return len;
+}
+
+static DEVICE_ATTR(connection, 0644, vidi_show_connection,
+ vidi_store_connection);
+
+int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vidi_context *ctx = NULL;
+ struct drm_encoder *encoder;
+ struct exynos_drm_manager *manager;
+ struct exynos_drm_display_ops *display_ops;
+ struct drm_exynos_vidi_connection *vidi = data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (!vidi) {
+ DRM_DEBUG_KMS("user data for vidi is null.\n");
+ return -EINVAL;
+ }
+
+ if (!vidi->edid) {
+ DRM_DEBUG_KMS("edid data is null.\n");
+ return -EINVAL;
+ }
+
+ if (vidi->connection > 1) {
+ DRM_DEBUG_KMS("connection should be 0 or 1.\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
+ head) {
+ manager = exynos_drm_get_manager(encoder);
+ display_ops = manager->display_ops;
+
+ if (display_ops->type == EXYNOS_DISPLAY_TYPE_VIDI) {
+ ctx = get_vidi_context(manager->dev);
+ break;
+ }
+ }
+
+ if (!ctx) {
+ DRM_DEBUG_KMS("not found virtual device type encoder.\n");
+ return -EINVAL;
+ }
+
+ if (ctx->connected == vidi->connection) {
+ DRM_DEBUG_KMS("same connection request.\n");
+ return -EINVAL;
+ }
+
+ if (vidi->connection)
+ ctx->raw_edid = (struct edid *)vidi->edid;
+
+ ctx->connected = vidi->connection;
+ drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
+
+ return 0;
+}
+
+static int __devinit vidi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct vidi_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->default_win = 0;
+
+ INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
+
+ /* for test */
+ ctx->raw_edid = (struct edid *)fake_edid_info;
+
+ subdrv = &ctx->subdrv;
+ subdrv->probe = vidi_subdrv_probe;
+ subdrv->remove = vidi_subdrv_remove;
+ subdrv->manager.pipe = -1;
+ subdrv->manager.ops = &vidi_manager_ops;
+ subdrv->manager.overlay_ops = &vidi_overlay_ops;
+ subdrv->manager.display_ops = &vidi_display_ops;
+ subdrv->manager.dev = dev;
+
+ mutex_init(&ctx->lock);
+
+ platform_set_drvdata(pdev, ctx);
+
+ ret = device_create_file(&pdev->dev, &dev_attr_connection);
+ if (ret < 0)
+ DRM_INFO("failed to create connection sysfs.\n");
+
+ exynos_drm_subdrv_register(subdrv);
+
+ return 0;
+}
+
+static int __devexit vidi_remove(struct platform_device *pdev)
+{
+ struct vidi_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vidi_suspend(struct device *dev)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+
+ return vidi_power_on(ctx, false);
+}
+
+static int vidi_resume(struct device *dev)
+{
+ struct vidi_context *ctx = get_vidi_context(dev);
+
+ return vidi_power_on(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops vidi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(vidi_suspend, vidi_resume)
+};
+
+struct platform_driver vidi_driver = {
+ .probe = vidi_probe,
+ .remove = __devexit_p(vidi_remove),
+ .driver = {
+ .name = "exynos-drm-vidi",
+ .owner = THIS_MODULE,
+ .pm = &vidi_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
new file mode 100644
index 0000000..a4babe4
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
@@ -0,0 +1,36 @@
+/* exynos_drm_vidi.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_VIDI_H_
+#define _EXYNOS_DRM_VIDI_H_
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv);
+#else
+#define vidi_connection_ioctl NULL
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 3429d3f..575a8cb 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -41,44 +41,83 @@
#include "exynos_hdmi.h"
#define HDMI_OVERLAY_NUMBER 3
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
-static const u8 hdmiphy_conf27[32] = {
+struct hdmi_resources {
+ struct clk *hdmi;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_pixel;
+ struct clk *sclk_hdmiphy;
+ struct clk *hdmiphy;
+ struct regulator_bulk_data *regul_bulk;
+ int regul_count;
+};
+
+struct hdmi_context {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct fb_videomode *default_timing;
+ unsigned int is_v13:1;
+ unsigned int default_win;
+ unsigned int default_bpp;
+ bool hpd_handle;
+ bool enabled;
+
+ struct resource *regs_res;
+ void __iomem *regs;
+ unsigned int irq;
+ struct workqueue_struct *wq;
+ struct work_struct hotplug_work;
+
+ struct i2c_client *ddc_port;
+ struct i2c_client *hdmiphy_port;
+
+ /* current hdmiphy conf index */
+ int cur_conf;
+
+ struct hdmi_resources res;
+ void *parent_ctx;
+};
+
+/* HDMI Version 1.3 */
+static const u8 hdmiphy_v13_conf27[32] = {
0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
};
-static const u8 hdmiphy_conf27_027[32] = {
+static const u8 hdmiphy_v13_conf27_027[32] = {
0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
};
-static const u8 hdmiphy_conf74_175[32] = {
+static const u8 hdmiphy_v13_conf74_175[32] = {
0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
};
-static const u8 hdmiphy_conf74_25[32] = {
+static const u8 hdmiphy_v13_conf74_25[32] = {
0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
};
-static const u8 hdmiphy_conf148_5[32] = {
+static const u8 hdmiphy_v13_conf148_5[32] = {
0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
};
-struct hdmi_tg_regs {
+struct hdmi_v13_tg_regs {
u8 cmd;
u8 h_fsz_l;
u8 h_fsz_h;
@@ -110,7 +149,7 @@ struct hdmi_tg_regs {
u8 field_bot_hdmi_h;
};
-struct hdmi_core_regs {
+struct hdmi_v13_core_regs {
u8 h_blank[2];
u8 v_blank[3];
u8 h_v_line[3];
@@ -123,12 +162,21 @@ struct hdmi_core_regs {
u8 v_sync_gen3[3];
};
-struct hdmi_preset_conf {
- struct hdmi_core_regs core;
- struct hdmi_tg_regs tg;
+struct hdmi_v13_preset_conf {
+ struct hdmi_v13_core_regs core;
+ struct hdmi_v13_tg_regs tg;
};
-static const struct hdmi_preset_conf hdmi_conf_480p = {
+struct hdmi_v13_conf {
+ int width;
+ int height;
+ int vrefresh;
+ bool interlace;
+ const u8 *hdmiphy_data;
+ const struct hdmi_v13_preset_conf *conf;
+};
+
+static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = {
.core = {
.h_blank = {0x8a, 0x00},
.v_blank = {0x0d, 0x6a, 0x01},
@@ -154,7 +202,7 @@ static const struct hdmi_preset_conf hdmi_conf_480p = {
},
};
-static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = {
.core = {
.h_blank = {0x72, 0x01},
.v_blank = {0xee, 0xf2, 0x00},
@@ -182,7 +230,7 @@ static const struct hdmi_preset_conf hdmi_conf_720p60 = {
},
};
-static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
+static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = {
.core = {
.h_blank = {0xd0, 0x02},
.v_blank = {0x32, 0xB2, 0x00},
@@ -210,7 +258,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
},
};
-static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = {
.core = {
.h_blank = {0xd0, 0x02},
.v_blank = {0x65, 0x6c, 0x01},
@@ -238,7 +286,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
},
};
-static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
+static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = {
.core = {
.h_blank = {0x18, 0x01},
.v_blank = {0x32, 0xB2, 0x00},
@@ -266,7 +314,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
},
};
-static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
.core = {
.h_blank = {0x18, 0x01},
.v_blank = {0x65, 0x6c, 0x01},
@@ -294,13 +342,530 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
},
};
+static const struct hdmi_v13_conf hdmi_v13_confs[] = {
+ { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
+ { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
+ { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
+ { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
+ { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p50 },
+ { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
+ { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p60 },
+};
+
+/* HDMI Version 1.4 */
+static const u8 hdmiphy_conf27_027[32] = {
+ 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+ 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+};
+
+struct hdmi_tg_regs {
+ u8 cmd;
+ u8 h_fsz_l;
+ u8 h_fsz_h;
+ u8 hact_st_l;
+ u8 hact_st_h;
+ u8 hact_sz_l;
+ u8 hact_sz_h;
+ u8 v_fsz_l;
+ u8 v_fsz_h;
+ u8 vsync_l;
+ u8 vsync_h;
+ u8 vsync2_l;
+ u8 vsync2_h;
+ u8 vact_st_l;
+ u8 vact_st_h;
+ u8 vact_sz_l;
+ u8 vact_sz_h;
+ u8 field_chg_l;
+ u8 field_chg_h;
+ u8 vact_st2_l;
+ u8 vact_st2_h;
+ u8 vact_st3_l;
+ u8 vact_st3_h;
+ u8 vact_st4_l;
+ u8 vact_st4_h;
+ u8 vsync_top_hdmi_l;
+ u8 vsync_top_hdmi_h;
+ u8 vsync_bot_hdmi_l;
+ u8 vsync_bot_hdmi_h;
+ u8 field_top_hdmi_l;
+ u8 field_top_hdmi_h;
+ u8 field_bot_hdmi_l;
+ u8 field_bot_hdmi_h;
+ u8 tg_3d;
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v2_blank[2];
+ u8 v1_blank[2];
+ u8 v_line[2];
+ u8 h_line[2];
+ u8 hsync_pol[1];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f0[2];
+ u8 v_blank_f1[2];
+ u8 h_sync_start[2];
+ u8 h_sync_end[2];
+ u8 v_sync_line_bef_2[2];
+ u8 v_sync_line_bef_1[2];
+ u8 v_sync_line_aft_2[2];
+ u8 v_sync_line_aft_1[2];
+ u8 v_sync_line_aft_pxl_2[2];
+ u8 v_sync_line_aft_pxl_1[2];
+ u8 v_blank_f2[2]; /* for 3D mode */
+ u8 v_blank_f3[2]; /* for 3D mode */
+ u8 v_blank_f4[2]; /* for 3D mode */
+ u8 v_blank_f5[2]; /* for 3D mode */
+ u8 v_sync_line_aft_3[2];
+ u8 v_sync_line_aft_4[2];
+ u8 v_sync_line_aft_5[2];
+ u8 v_sync_line_aft_6[2];
+ u8 v_sync_line_aft_pxl_3[2];
+ u8 v_sync_line_aft_pxl_4[2];
+ u8 v_sync_line_aft_pxl_5[2];
+ u8 v_sync_line_aft_pxl_6[2];
+ u8 vact_space_1[2];
+ u8 vact_space_2[2];
+ u8 vact_space_3[2];
+ u8 vact_space_4[2];
+ u8 vact_space_5[2];
+ u8 vact_space_6[2];
+};
+
+struct hdmi_preset_conf {
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+};
+
+struct hdmi_conf {
+ int width;
+ int height;
+ int vrefresh;
+ bool interlace;
+ const u8 *hdmiphy_data;
+ const struct hdmi_preset_conf *conf;
+};
+
+static const struct hdmi_preset_conf hdmi_conf_480p60 = {
+ .core = {
+ .h_blank = {0x8a, 0x00},
+ .v2_blank = {0x0d, 0x02},
+ .v1_blank = {0x2d, 0x00},
+ .v_line = {0x0d, 0x02},
+ .h_line = {0x5a, 0x03},
+ .hsync_pol = {0x01},
+ .vsync_pol = {0x01},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0x0e, 0x00},
+ .h_sync_end = {0x4c, 0x00},
+ .v_sync_line_bef_2 = {0x0f, 0x00},
+ .v_sync_line_bef_1 = {0x09, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x5a, 0x03, /* h_fsz */
+ 0x8a, 0x00, 0xd0, 0x02, /* hact */
+ 0x0d, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0xe0, 0x01, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p50 = {
+ .core = {
+ .h_blank = {0xbc, 0x02},
+ .v2_blank = {0xee, 0x02},
+ .v1_blank = {0x1e, 0x00},
+ .v_line = {0xee, 0x02},
+ .h_line = {0xbc, 0x07},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0xb6, 0x01},
+ .h_sync_end = {0xde, 0x01},
+ .v_sync_line_bef_2 = {0x0a, 0x00},
+ .v_sync_line_bef_1 = {0x05, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0xbc, 0x07, /* h_fsz */
+ 0xbc, 0x02, 0x00, 0x05, /* hact */
+ 0xee, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x1e, 0x00, 0xd0, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+ .core = {
+ .h_blank = {0x72, 0x01},
+ .v2_blank = {0xee, 0x02},
+ .v1_blank = {0x1e, 0x00},
+ .v_line = {0xee, 0x02},
+ .h_line = {0x72, 0x06},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0x6c, 0x00},
+ .h_sync_end = {0x94, 0x00},
+ .v_sync_line_bef_2 = {0x0a, 0x00},
+ .v_sync_line_bef_1 = {0x05, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x72, 0x06, /* h_fsz */
+ 0x72, 0x01, 0x00, 0x05, /* hact */
+ 0xee, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x1e, 0x00, 0xd0, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v2_blank = {0x32, 0x02},
+ .v1_blank = {0x16, 0x00},
+ .v_line = {0x65, 0x04},
+ .h_line = {0x50, 0x0a},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f0 = {0x49, 0x02},
+ .v_blank_f1 = {0x65, 0x04},
+ .h_sync_start = {0x0e, 0x02},
+ .h_sync_end = {0x3a, 0x02},
+ .v_sync_line_bef_2 = {0x07, 0x00},
+ .v_sync_line_bef_1 = {0x02, 0x00},
+ .v_sync_line_aft_2 = {0x39, 0x02},
+ .v_sync_line_aft_1 = {0x34, 0x02},
+ .v_sync_line_aft_pxl_2 = {0x38, 0x07},
+ .v_sync_line_aft_pxl_1 = {0x38, 0x07},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0a, /* h_fsz */
+ 0xd0, 0x02, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v2_blank = {0x32, 0x02},
+ .v1_blank = {0x16, 0x00},
+ .v_line = {0x65, 0x04},
+ .h_line = {0x98, 0x08},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f0 = {0x49, 0x02},
+ .v_blank_f1 = {0x65, 0x04},
+ .h_sync_start = {0x56, 0x00},
+ .h_sync_end = {0x82, 0x00},
+ .v_sync_line_bef_2 = {0x07, 0x00},
+ .v_sync_line_bef_1 = {0x02, 0x00},
+ .v_sync_line_aft_2 = {0x39, 0x02},
+ .v_sync_line_aft_1 = {0x34, 0x02},
+ .v_sync_line_aft_pxl_2 = {0xa4, 0x04},
+ .v_sync_line_aft_pxl_1 = {0xa4, 0x04},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v2_blank = {0x65, 0x04},
+ .v1_blank = {0x2d, 0x00},
+ .v_line = {0x65, 0x04},
+ .h_line = {0x50, 0x0a},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0x0e, 0x02},
+ .h_sync_end = {0x3a, 0x02},
+ .v_sync_line_bef_2 = {0x09, 0x00},
+ .v_sync_line_bef_1 = {0x04, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0a, /* h_fsz */
+ 0xd0, 0x02, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v2_blank = {0x65, 0x04},
+ .v1_blank = {0x2d, 0x00},
+ .v_line = {0x65, 0x04},
+ .h_line = {0x98, 0x08},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0x56, 0x00},
+ .h_sync_end = {0x82, 0x00},
+ .v_sync_line_bef_2 = {0x09, 0x00},
+ .v_sync_line_bef_1 = {0x04, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
static const struct hdmi_conf hdmi_confs[] = {
+ { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+ { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p },
{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
@@ -324,7 +889,7 @@ static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
writel(value, hdata->regs + reg_id);
}
-static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
{
#define DUMPREG(reg_id) \
DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
@@ -333,6 +898,101 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
DUMPREG(HDMI_INTC_FLAG);
DUMPREG(HDMI_INTC_CON);
DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_V13_PHY_RSTOUT);
+ DUMPREG(HDMI_V13_PHY_VPLL);
+ DUMPREG(HDMI_V13_PHY_CMU);
+ DUMPREG(HDMI_V13_CORE_RSTOUT);
+
+ DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_V13_PHY_STATUS);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_V13_HPD_GEN);
+ DUMPREG(HDMI_V13_DC_CONTROL);
+ DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN);
+
+ DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V13_V_BLANK_0);
+ DUMPREG(HDMI_V13_V_BLANK_1);
+ DUMPREG(HDMI_V13_V_BLANK_2);
+ DUMPREG(HDMI_V13_H_V_LINE_0);
+ DUMPREG(HDMI_V13_H_V_LINE_1);
+ DUMPREG(HDMI_V13_H_V_LINE_2);
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V13_V_BLANK_F_0);
+ DUMPREG(HDMI_V13_V_BLANK_F_1);
+ DUMPREG(HDMI_V13_V_BLANK_F_2);
+ DUMPREG(HDMI_V13_H_SYNC_GEN_0);
+ DUMPREG(HDMI_V13_H_SYNC_GEN_1);
+ DUMPREG(HDMI_V13_H_SYNC_GEN_2);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_1_0);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_1_1);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_1_2);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_2_0);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_2_1);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_2_2);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_3_0);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_3_1);
+ DUMPREG(HDMI_V13_V_SYNC_GEN_3_2);
+
+ DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+ int i;
+
+#define DUMPREG(reg_id) \
+ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdata->regs + reg_id))
+
+ DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_INTC_CON_1);
+ DUMPREG(HDMI_INTC_FLAG_1);
+ DUMPREG(HDMI_PHY_STATUS_0);
+ DUMPREG(HDMI_PHY_STATUS_PLL);
+ DUMPREG(HDMI_PHY_CON_0);
DUMPREG(HDMI_PHY_RSTOUT);
DUMPREG(HDMI_PHY_VPLL);
DUMPREG(HDMI_PHY_CMU);
@@ -343,40 +1003,93 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
DUMPREG(HDMI_CON_1);
DUMPREG(HDMI_CON_2);
DUMPREG(HDMI_SYS_STATUS);
- DUMPREG(HDMI_PHY_STATUS);
+ DUMPREG(HDMI_PHY_STATUS_0);
DUMPREG(HDMI_STATUS_EN);
DUMPREG(HDMI_HPD);
DUMPREG(HDMI_MODE_SEL);
- DUMPREG(HDMI_HPD_GEN);
+ DUMPREG(HDMI_ENC_EN);
DUMPREG(HDMI_DC_CONTROL);
DUMPREG(HDMI_VIDEO_PATTERN_GEN);
DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
DUMPREG(HDMI_H_BLANK_0);
DUMPREG(HDMI_H_BLANK_1);
- DUMPREG(HDMI_V_BLANK_0);
- DUMPREG(HDMI_V_BLANK_1);
- DUMPREG(HDMI_V_BLANK_2);
- DUMPREG(HDMI_H_V_LINE_0);
- DUMPREG(HDMI_H_V_LINE_1);
- DUMPREG(HDMI_H_V_LINE_2);
+ DUMPREG(HDMI_V2_BLANK_0);
+ DUMPREG(HDMI_V2_BLANK_1);
+ DUMPREG(HDMI_V1_BLANK_0);
+ DUMPREG(HDMI_V1_BLANK_1);
+ DUMPREG(HDMI_V_LINE_0);
+ DUMPREG(HDMI_V_LINE_1);
+ DUMPREG(HDMI_H_LINE_0);
+ DUMPREG(HDMI_H_LINE_1);
+ DUMPREG(HDMI_HSYNC_POL);
+
DUMPREG(HDMI_VSYNC_POL);
DUMPREG(HDMI_INT_PRO_MODE);
- DUMPREG(HDMI_V_BLANK_F_0);
- DUMPREG(HDMI_V_BLANK_F_1);
- DUMPREG(HDMI_V_BLANK_F_2);
- DUMPREG(HDMI_H_SYNC_GEN_0);
- DUMPREG(HDMI_H_SYNC_GEN_1);
- DUMPREG(HDMI_H_SYNC_GEN_2);
- DUMPREG(HDMI_V_SYNC_GEN_1_0);
- DUMPREG(HDMI_V_SYNC_GEN_1_1);
- DUMPREG(HDMI_V_SYNC_GEN_1_2);
- DUMPREG(HDMI_V_SYNC_GEN_2_0);
- DUMPREG(HDMI_V_SYNC_GEN_2_1);
- DUMPREG(HDMI_V_SYNC_GEN_2_2);
- DUMPREG(HDMI_V_SYNC_GEN_3_0);
- DUMPREG(HDMI_V_SYNC_GEN_3_1);
- DUMPREG(HDMI_V_SYNC_GEN_3_2);
+ DUMPREG(HDMI_V_BLANK_F0_0);
+ DUMPREG(HDMI_V_BLANK_F0_1);
+ DUMPREG(HDMI_V_BLANK_F1_0);
+ DUMPREG(HDMI_V_BLANK_F1_1);
+
+ DUMPREG(HDMI_H_SYNC_START_0);
+ DUMPREG(HDMI_H_SYNC_START_1);
+ DUMPREG(HDMI_H_SYNC_END_0);
+ DUMPREG(HDMI_H_SYNC_END_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0);
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1);
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0);
+ DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1);
+
+ DUMPREG(HDMI_V_BLANK_F2_0);
+ DUMPREG(HDMI_V_BLANK_F2_1);
+ DUMPREG(HDMI_V_BLANK_F3_0);
+ DUMPREG(HDMI_V_BLANK_F3_1);
+ DUMPREG(HDMI_V_BLANK_F4_0);
+ DUMPREG(HDMI_V_BLANK_F4_1);
+ DUMPREG(HDMI_V_BLANK_F5_0);
+ DUMPREG(HDMI_V_BLANK_F5_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1);
+
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0);
+ DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1);
+
+ DUMPREG(HDMI_VACT_SPACE_1_0);
+ DUMPREG(HDMI_VACT_SPACE_1_1);
+ DUMPREG(HDMI_VACT_SPACE_2_0);
+ DUMPREG(HDMI_VACT_SPACE_2_1);
+ DUMPREG(HDMI_VACT_SPACE_3_0);
+ DUMPREG(HDMI_VACT_SPACE_3_1);
+ DUMPREG(HDMI_VACT_SPACE_4_0);
+ DUMPREG(HDMI_VACT_SPACE_4_1);
+ DUMPREG(HDMI_VACT_SPACE_5_0);
+ DUMPREG(HDMI_VACT_SPACE_5_1);
+ DUMPREG(HDMI_VACT_SPACE_6_0);
+ DUMPREG(HDMI_VACT_SPACE_6_1);
DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
DUMPREG(HDMI_TG_CMD);
@@ -400,6 +1113,10 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
DUMPREG(HDMI_TG_FIELD_CHG_H);
DUMPREG(HDMI_TG_VACT_ST2_L);
DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VACT_ST3_L);
+ DUMPREG(HDMI_TG_VACT_ST3_H);
+ DUMPREG(HDMI_TG_VACT_ST4_L);
+ DUMPREG(HDMI_TG_VACT_ST4_H);
DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
@@ -408,10 +1125,49 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_3D);
+
+ DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_AVI_CON);
+ DUMPREG(HDMI_AVI_HEADER0);
+ DUMPREG(HDMI_AVI_HEADER1);
+ DUMPREG(HDMI_AVI_HEADER2);
+ DUMPREG(HDMI_AVI_CHECK_SUM);
+ DUMPREG(HDMI_VSI_CON);
+ DUMPREG(HDMI_VSI_HEADER0);
+ DUMPREG(HDMI_VSI_HEADER1);
+ DUMPREG(HDMI_VSI_HEADER2);
+ for (i = 0; i < 7; ++i)
+ DUMPREG(HDMI_VSI_DATA(i));
+
#undef DUMPREG
}
-static int hdmi_conf_index(struct drm_display_mode *mode)
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+ if (hdata->is_v13)
+ hdmi_v13_regs_dump(hdata, prefix);
+ else
+ hdmi_v14_regs_dump(hdata, prefix);
+}
+
+static int hdmi_v13_conf_index(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
+ if (hdmi_v13_confs[i].width == mode->hdisplay &&
+ hdmi_v13_confs[i].height == mode->vdisplay &&
+ hdmi_v13_confs[i].vrefresh == mode->vrefresh &&
+ hdmi_v13_confs[i].interlace ==
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+ true : false))
+ return i;
+
+ return -EINVAL;
+}
+
+static int hdmi_v14_conf_index(struct drm_display_mode *mode)
{
int i;
@@ -424,7 +1180,16 @@ static int hdmi_conf_index(struct drm_display_mode *mode)
true : false))
return i;
- return -1;
+ return -EINVAL;
+}
+
+static int hdmi_conf_index(struct hdmi_context *hdata,
+ struct drm_display_mode *mode)
+{
+ if (hdata->is_v13)
+ return hdmi_v13_conf_index(mode);
+
+ return hdmi_v14_conf_index(mode);
}
static bool hdmi_is_connected(void *ctx)
@@ -462,29 +1227,69 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
return 0;
}
-static int hdmi_check_timing(void *ctx, void *timing)
+static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
{
- struct fb_videomode *check_timing = timing;
int i;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ check_timing->xres, check_timing->yres,
+ check_timing->refresh, (check_timing->vmode &
+ FB_VMODE_INTERLACED) ? true : false);
- DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
- check_timing->yres, check_timing->refresh,
- check_timing->vmode);
+ for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
+ if (hdmi_v13_confs[i].width == check_timing->xres &&
+ hdmi_v13_confs[i].height == check_timing->yres &&
+ hdmi_v13_confs[i].vrefresh == check_timing->refresh &&
+ hdmi_v13_confs[i].interlace ==
+ ((check_timing->vmode & FB_VMODE_INTERLACED) ?
+ true : false))
+ return 0;
- for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+ /* TODO */
+
+ return -EINVAL;
+}
+
+static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
+{
+ int i;
+
+ DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ check_timing->xres, check_timing->yres,
+ check_timing->refresh, (check_timing->vmode &
+ FB_VMODE_INTERLACED) ? true : false);
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++)
if (hdmi_confs[i].width == check_timing->xres &&
hdmi_confs[i].height == check_timing->yres &&
hdmi_confs[i].vrefresh == check_timing->refresh &&
hdmi_confs[i].interlace ==
((check_timing->vmode & FB_VMODE_INTERLACED) ?
true : false))
- return 0;
+ return 0;
+
+ /* TODO */
return -EINVAL;
}
+static int hdmi_check_timing(void *ctx, void *timing)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct fb_videomode *check_timing = timing;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
+ check_timing->yres, check_timing->refresh,
+ check_timing->vmode);
+
+ if (hdata->is_v13)
+ return hdmi_v13_check_timing(check_timing);
+ else
+ return hdmi_v14_check_timing(check_timing);
+}
+
static int hdmi_display_power_on(void *ctx, int mode)
{
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -514,15 +1319,185 @@ static struct exynos_hdmi_display_ops display_ops = {
.power_on = hdmi_display_power_on,
};
+static void hdmi_set_acr(u32 freq, u8 *acr)
+{
+ u32 n, cts;
+
+ switch (freq) {
+ case 32000:
+ n = 4096;
+ cts = 27000;
+ break;
+ case 44100:
+ n = 6272;
+ cts = 30000;
+ break;
+ case 88200:
+ n = 12544;
+ cts = 30000;
+ break;
+ case 176400:
+ n = 25088;
+ cts = 30000;
+ break;
+ case 48000:
+ n = 6144;
+ cts = 27000;
+ break;
+ case 96000:
+ n = 12288;
+ cts = 27000;
+ break;
+ case 192000:
+ n = 24576;
+ cts = 27000;
+ break;
+ default:
+ n = 0;
+ cts = 0;
+ break;
+ }
+
+ acr[1] = cts >> 16;
+ acr[2] = cts >> 8 & 0xff;
+ acr[3] = cts & 0xff;
+
+ acr[4] = n >> 16;
+ acr[5] = n >> 8 & 0xff;
+ acr[6] = n & 0xff;
+}
+
+static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
+{
+ hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
+
+ if (hdata->is_v13)
+ hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
+ else
+ hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
+}
+
+static void hdmi_audio_init(struct hdmi_context *hdata)
+{
+ u32 sample_rate, bits_per_sample, frame_size_code;
+ u32 data_num, bit_ch, sample_frq;
+ u32 val;
+ u8 acr[7];
+
+ sample_rate = 44100;
+ bits_per_sample = 16;
+ frame_size_code = 0;
+
+ switch (bits_per_sample) {
+ case 20:
+ data_num = 2;
+ bit_ch = 1;
+ break;
+ case 24:
+ data_num = 3;
+ bit_ch = 1;
+ break;
+ default:
+ data_num = 1;
+ bit_ch = 0;
+ break;
+ }
+
+ hdmi_set_acr(sample_rate, acr);
+ hdmi_reg_acr(hdata, acr);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
+ | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
+ | HDMI_I2S_MUX_ENABLE);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN
+ | HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN);
+
+ sample_frq = (sample_rate == 44100) ? 0 :
+ (sample_rate == 48000) ? 2 :
+ (sample_rate == 32000) ? 3 :
+ (sample_rate == 96000) ? 0xa : 0x0;
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN);
+
+ val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01;
+ hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val);
+
+ /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
+ | HDMI_I2S_SEL_LRCK(6));
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
+ | HDMI_I2S_SEL_SDATA2(4));
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
+ | HDMI_I2S_SEL_SDATA2(2));
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
+
+ /* I2S_CON_1 & 2 */
+ hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE
+ | HDMI_I2S_L_CH_LOW_POL);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE
+ | HDMI_I2S_SET_BIT_CH(bit_ch)
+ | HDMI_I2S_SET_SDATA_BIT(data_num)
+ | HDMI_I2S_BASIC_FORMAT);
+
+ /* Configure register related to CUV information */
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0
+ | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH
+ | HDMI_I2S_COPYRIGHT
+ | HDMI_I2S_LINEAR_PCM
+ | HDMI_I2S_CONSUMER_FORMAT);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER);
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0));
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2
+ | HDMI_I2S_SET_SMP_FREQ(sample_frq));
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4,
+ HDMI_I2S_ORG_SMP_FREQ_44_1
+ | HDMI_I2S_WORD_LEN_MAX24_24BITS
+ | HDMI_I2S_WORD_LEN_MAX_24BITS);
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD);
+}
+
+static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
+{
+ u32 mod;
+
+ mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+ if (mod & HDMI_DVI_MODE_EN)
+ return;
+
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
+ hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ?
+ HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
+}
+
static void hdmi_conf_reset(struct hdmi_context *hdata)
{
+ u32 reg;
+
/* disable hpd handle for drm */
hdata->hpd_handle = false;
+ if (hdata->is_v13)
+ reg = HDMI_V13_CORE_RSTOUT;
+ else
+ reg = HDMI_CORE_RSTOUT;
+
/* resetting HDMI core */
- hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
+ hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
- hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+ hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
/* enable hpd handle for drm */
@@ -546,27 +1521,126 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
- /* choose bluescreen (fecal) color */
- hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12);
- hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34);
- hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56);
- /* enable AVI packet every vsync, fixes purple line problem */
- hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
- /* force RGB, look to CEA-861-D, table 7 for more detail */
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5);
- hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
-
- hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04);
+
+ if (hdata->is_v13) {
+ /* choose bluescreen (fecal) color */
+ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
+ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
+ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56);
+
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02);
+ /* force RGB, look to CEA-861-D, table 7 for more detail */
+ hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5);
+ hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+ hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
+ } else {
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
+ hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
+ }
/* enable hpd handle for drm */
hdata->hpd_handle = true;
}
-static void hdmi_timing_apply(struct hdmi_context *hdata,
- const struct hdmi_preset_conf *conf)
+static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
{
+ const struct hdmi_v13_preset_conf *conf =
+ hdmi_v13_confs[hdata->cur_conf].conf;
+ const struct hdmi_v13_core_regs *core = &conf->core;
+ const struct hdmi_v13_tg_regs *tg = &conf->tg;
+ int tries;
+
+ /* setting core registers */
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ /* Timing generator registers */
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ mdelay(1);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+ hdmi_regs_dump(hdata, "timing apply");
+ }
+
+ clk_disable(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+ clk_enable(hdata->res.sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+ if (core->int_pro_mode[0])
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+ HDMI_FIELD_EN);
+ else
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
+{
+ const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf;
const struct hdmi_core_regs *core = &conf->core;
const struct hdmi_tg_regs *tg = &conf->tg;
int tries;
@@ -574,29 +1648,102 @@ static void hdmi_timing_apply(struct hdmi_context *hdata,
/* setting core registers */
hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]);
- hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]);
- hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]);
- hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
- hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
- hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
- hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
+ core->v_sync_line_bef_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
+ core->v_sync_line_bef_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
+ core->v_sync_line_bef_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
+ core->v_sync_line_bef_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
+ core->v_sync_line_aft_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
+ core->v_sync_line_aft_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
+ core->v_sync_line_aft_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
+ core->v_sync_line_aft_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
+ core->v_sync_line_aft_pxl_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
+ core->v_sync_line_aft_pxl_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
+ core->v_sync_line_aft_pxl_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
+ core->v_sync_line_aft_pxl_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
+ core->v_sync_line_aft_3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
+ core->v_sync_line_aft_3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
+ core->v_sync_line_aft_4[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
+ core->v_sync_line_aft_4[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
+ core->v_sync_line_aft_5[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
+ core->v_sync_line_aft_5[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
+ core->v_sync_line_aft_6[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
+ core->v_sync_line_aft_6[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
+ core->v_sync_line_aft_pxl_3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
+ core->v_sync_line_aft_pxl_3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
+ core->v_sync_line_aft_pxl_4[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
+ core->v_sync_line_aft_pxl_4[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
+ core->v_sync_line_aft_pxl_5[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
+ core->v_sync_line_aft_pxl_5[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
+ core->v_sync_line_aft_pxl_6[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
+ core->v_sync_line_aft_pxl_6[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
+ hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+
/* Timing generator registers */
hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
@@ -618,6 +1765,10 @@ static void hdmi_timing_apply(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h);
hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
@@ -626,10 +1777,11 @@ static void hdmi_timing_apply(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d);
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
- u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
+ u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
if (val & HDMI_PHY_STATUS_READY)
break;
mdelay(1);
@@ -653,9 +1805,18 @@ static void hdmi_timing_apply(struct hdmi_context *hdata,
hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
}
+static void hdmi_timing_apply(struct hdmi_context *hdata)
+{
+ if (hdata->is_v13)
+ hdmi_v13_timing_apply(hdata);
+ else
+ hdmi_v14_timing_apply(hdata);
+}
+
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
u8 buffer[2];
+ u32 reg;
clk_disable(hdata->res.sclk_hdmi);
clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
@@ -668,15 +1829,21 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
if (hdata->hdmiphy_port)
i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+ if (hdata->is_v13)
+ reg = HDMI_V13_PHY_RSTOUT;
+ else
+ reg = HDMI_PHY_RSTOUT;
+
/* reset hdmiphy */
- hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+ hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
mdelay(10);
- hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
+ hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT);
mdelay(10);
}
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
+ const u8 *hdmiphy_data;
u8 buffer[32];
u8 operation[2];
u8 read_buffer[32] = {0, };
@@ -689,7 +1856,12 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
}
/* pixel clock */
- memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32);
+ if (hdata->is_v13)
+ hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
+ else
+ hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data;
+
+ memcpy(buffer, hdmiphy_data, 32);
ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
if (ret != 32) {
DRM_ERROR("failed to configure HDMIPHY via I2C\n");
@@ -721,9 +1893,6 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
static void hdmi_conf_apply(struct hdmi_context *hdata)
{
- const struct hdmi_preset_conf *conf =
- hdmi_confs[hdata->cur_conf].conf;
-
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmiphy_conf_reset(hdata);
@@ -731,13 +1900,55 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmi_conf_reset(hdata);
hdmi_conf_init(hdata);
+ hdmi_audio_init(hdata);
/* setting core registers */
- hdmi_timing_apply(hdata, conf);
+ hdmi_timing_apply(hdata);
+ hdmi_audio_control(hdata, true);
hdmi_regs_dump(hdata, "start");
}
+static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_display_mode *m;
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ int index;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ if (hdata->is_v13)
+ index = hdmi_v13_conf_index(adjusted_mode);
+ else
+ index = hdmi_v14_conf_index(adjusted_mode);
+
+ /* just return if user desired mode exists. */
+ if (index >= 0)
+ return;
+
+ /*
+ * otherwise, find the most suitable mode among modes and change it
+ * to adjusted_mode.
+ */
+ list_for_each_entry(m, &connector->modes, head) {
+ if (hdata->is_v13)
+ index = hdmi_v13_conf_index(m);
+ else
+ index = hdmi_v14_conf_index(m);
+
+ if (index >= 0) {
+ DRM_INFO("desired mode doesn't exist so\n");
+ DRM_INFO("use the most suitable mode among modes.\n");
+ memcpy(adjusted_mode, m, sizeof(*m));
+ break;
+ }
+ }
+}
+
static void hdmi_mode_set(void *ctx, void *mode)
{
struct hdmi_context *hdata = (struct hdmi_context *)ctx;
@@ -745,13 +1956,22 @@ static void hdmi_mode_set(void *ctx, void *mode)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- conf_idx = hdmi_conf_index(mode);
- if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs))
+ conf_idx = hdmi_conf_index(hdata, mode);
+ if (conf_idx >= 0)
hdata->cur_conf = conf_idx;
else
DRM_DEBUG_KMS("not supported mode\n");
}
+static void hdmi_get_max_resol(void *ctx, unsigned int *width,
+ unsigned int *height)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ *width = MAX_WIDTH;
+ *height = MAX_HEIGHT;
+}
+
static void hdmi_commit(void *ctx)
{
struct hdmi_context *hdata = (struct hdmi_context *)ctx;
@@ -770,13 +1990,16 @@ static void hdmi_disable(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (hdata->enabled) {
+ hdmi_audio_control(hdata, false);
hdmiphy_conf_reset(hdata);
hdmi_conf_reset(hdata);
}
}
static struct exynos_hdmi_manager_ops manager_ops = {
+ .mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
+ .get_max_resol = hdmi_get_max_resol,
.commit = hdmi_commit,
.disable = hdmi_disable,
};
@@ -926,7 +2149,7 @@ static void hdmi_resource_poweron(struct hdmi_context *hdata)
hdmiphy_conf_reset(hdata);
hdmi_conf_reset(hdata);
hdmi_conf_init(hdata);
-
+ hdmi_audio_init(hdata);
}
static void hdmi_resource_poweroff(struct hdmi_context *hdata)
@@ -978,14 +2201,12 @@ void hdmi_attach_ddc_client(struct i2c_client *ddc)
if (ddc)
hdmi_ddc = ddc;
}
-EXPORT_SYMBOL(hdmi_attach_ddc_client);
void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
{
if (hdmiphy)
hdmi_hdmiphy = hdmiphy;
}
-EXPORT_SYMBOL(hdmi_attach_hdmiphy_client);
static int __devinit hdmi_probe(struct platform_device *pdev)
{
@@ -1022,6 +2243,7 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm_hdmi_ctx);
+ hdata->is_v13 = pdata->is_v13;
hdata->default_win = pdata->default_win;
hdata->default_timing = &pdata->timing;
hdata->default_bpp = pdata->bpp;
@@ -1167,10 +2389,3 @@ struct platform_driver hdmi_driver = {
.pm = &hdmi_pm_ops,
},
};
-EXPORT_SYMBOL(hdmi_driver);
-
-MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_DESCRIPTION("Samsung DRM HDMI core Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
index 31d6cf8..1c3b6d8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -28,56 +28,6 @@
#ifndef _EXYNOS_HDMI_H_
#define _EXYNOS_HDMI_H_
-struct hdmi_conf {
- int width;
- int height;
- int vrefresh;
- bool interlace;
- const u8 *hdmiphy_data;
- const struct hdmi_preset_conf *conf;
-};
-
-struct hdmi_resources {
- struct clk *hdmi;
- struct clk *sclk_hdmi;
- struct clk *sclk_pixel;
- struct clk *sclk_hdmiphy;
- struct clk *hdmiphy;
- struct regulator_bulk_data *regul_bulk;
- int regul_count;
-};
-
-struct hdmi_context {
- struct device *dev;
- struct drm_device *drm_dev;
- struct fb_videomode *default_timing;
- unsigned int default_win;
- unsigned int default_bpp;
- bool hpd_handle;
- bool enabled;
-
- struct resource *regs_res;
- /** base address of HDMI registers */
- void __iomem *regs;
- /** HDMI hotplug interrupt */
- unsigned int irq;
- /** workqueue for delayed work */
- struct workqueue_struct *wq;
- /** hotplug handling work */
- struct work_struct hotplug_work;
-
- struct i2c_client *ddc_port;
- struct i2c_client *hdmiphy_port;
-
- /** current hdmiphy conf index */
- int cur_conf;
- /** other resources */
- struct hdmi_resources res;
-
- void *parent_ctx;
-};
-
-
void hdmi_attach_ddc_client(struct i2c_client *ddc);
void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 93846e8..4d5f41e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,11 +36,57 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
-#include "exynos_hdmi.h"
-#include "exynos_mixer.h"
+
+#define HDMI_OVERLAY_NUMBER 3
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
+struct hdmi_win_data {
+ dma_addr_t dma_addr;
+ void __iomem *vaddr;
+ dma_addr_t chroma_dma_addr;
+ void __iomem *chroma_vaddr;
+ uint32_t pixel_format;
+ unsigned int bpp;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_width;
+ unsigned int crtc_height;
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int mode_width;
+ unsigned int mode_height;
+ unsigned int scan_flags;
+};
+
+struct mixer_resources {
+ struct device *dev;
+ int irq;
+ void __iomem *mixer_regs;
+ void __iomem *vp_regs;
+ spinlock_t reg_slock;
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_dac;
+};
+
+struct mixer_context {
+ struct fb_videomode *default_timing;
+ unsigned int default_win;
+ unsigned int default_bpp;
+ unsigned int irq;
+ int pipe;
+ bool interlace;
+ bool vp_enabled;
+
+ struct mixer_resources mixer_res;
+ struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER];
+};
+
static const u8 filter_y_horiz_tap8[] = {
0, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, 0, 0, 0,
@@ -1066,10 +1112,3 @@ struct platform_driver mixer_driver = {
.probe = mixer_probe,
.remove = __devexit_p(mixer_remove),
};
-EXPORT_SYMBOL(mixer_driver);
-
-MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
deleted file mode 100644
index cebacfe..0000000
--- a/drivers/gpu/drm/exynos/exynos_mixer.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- * Inki Dae <inki.dae@samsung.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _EXYNOS_MIXER_H_
-#define _EXYNOS_MIXER_H_
-
-#define HDMI_OVERLAY_NUMBER 3
-
-struct hdmi_win_data {
- dma_addr_t dma_addr;
- void __iomem *vaddr;
- dma_addr_t chroma_dma_addr;
- void __iomem *chroma_vaddr;
- uint32_t pixel_format;
- unsigned int bpp;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_width;
- unsigned int crtc_height;
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int mode_width;
- unsigned int mode_height;
- unsigned int scan_flags;
-};
-
-struct mixer_resources {
- struct device *dev;
- /** interrupt index */
- int irq;
- /** pointer to Mixer registers */
- void __iomem *mixer_regs;
- /** pointer to Video Processor registers */
- void __iomem *vp_regs;
- /** spinlock for protection of registers */
- spinlock_t reg_slock;
- /** other resources */
- struct clk *mixer;
- struct clk *vp;
- struct clk *sclk_mixer;
- struct clk *sclk_hdmi;
- struct clk *sclk_dac;
-};
-
-struct mixer_context {
- unsigned int default_win;
- struct fb_videomode *default_timing;
- unsigned int default_bpp;
-
- /** mixer interrupt */
- unsigned int irq;
- /** current crtc pipe for vblank */
- int pipe;
- /** interlace scan mode */
- bool interlace;
- /** vp enabled status */
- bool vp_enabled;
-
- /** mixer and vp resources */
- struct mixer_resources mixer_res;
-
- /** overlay window data */
- struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER];
-};
-
-#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 72e6b52..3c04bea 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -19,64 +19,67 @@
* Register part
*/
+/* HDMI Version 1.3 & Common */
#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
+#define HDMI_I2S_BASE(x) ((x) + 0x00040000)
#define HDMI_TG_BASE(x) ((x) + 0x00050000)
/* Control registers */
#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
-#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
-#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
-#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
-#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
+#define HDMI_V13_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
+#define HDMI_V13_PHY_VPLL HDMI_CTRL_BASE(0x0018)
+#define HDMI_V13_PHY_CMU HDMI_CTRL_BASE(0x001C)
+#define HDMI_V13_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
/* Core registers */
#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
-#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
+#define HDMI_V13_PHY_STATUS HDMI_CORE_BASE(0x0014)
#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
#define HDMI_HPD HDMI_CORE_BASE(0x0030)
#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
-#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
-#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
-#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
+#define HDMI_ENC_EN HDMI_CORE_BASE(0x0044)
+#define HDMI_V13_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
+#define HDMI_V13_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
+#define HDMI_V13_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
-#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
-#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
-#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
-#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
-#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
-#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
+#define HDMI_V13_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V13_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V13_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
+#define HDMI_V13_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_V13_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_V13_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
-#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
-#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
-#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
-#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
-#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
-#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
-#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
-#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
-#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
-#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
-#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
-#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
-#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
-#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
-#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
-#define HDMI_ACR_CON HDMI_CORE_BASE(0x0180)
-#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
-#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
-#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
-#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
-#define HDMI_AUI_CON HDMI_CORE_BASE(0x0360)
-#define HDMI_SPD_CON HDMI_CORE_BASE(0x0400)
+#define HDMI_V13_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V13_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V13_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
+#define HDMI_V13_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_V13_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_V13_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
+#define HDMI_V13_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V13_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V13_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
+#define HDMI_V13_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V13_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V13_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
+#define HDMI_V13_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V13_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V13_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
+#define HDMI_V13_ACR_CON HDMI_CORE_BASE(0x0180)
+#define HDMI_V13_AVI_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_V13_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_V13_DC_CONTROL HDMI_CORE_BASE(0x05C0)
+#define HDMI_V13_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
+#define HDMI_V13_HPD_GEN HDMI_CORE_BASE(0x05C8)
+#define HDMI_V13_AUI_CON HDMI_CORE_BASE(0x0360)
+#define HDMI_V13_SPD_CON HDMI_CORE_BASE(0x0400)
/* Timing generator registers */
#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
@@ -130,6 +133,9 @@
/* HDMI_CON_0 */
#define HDMI_BLUE_SCR_EN (1 << 5)
+#define HDMI_ASP_EN (1 << 2)
+#define HDMI_ASP_DIS (0 << 2)
+#define HDMI_ASP_MASK (1 << 2)
#define HDMI_EN (1 << 0)
/* HDMI_PHY_STATUS */
@@ -138,10 +144,418 @@
/* HDMI_MODE_SEL */
#define HDMI_MODE_HDMI_EN (1 << 1)
#define HDMI_MODE_DVI_EN (1 << 0)
+#define HDMI_DVI_MODE_EN (1)
+#define HDMI_DVI_MODE_DIS (0)
#define HDMI_MODE_MASK (3 << 0)
/* HDMI_TG_CMD */
#define HDMI_TG_EN (1 << 0)
#define HDMI_FIELD_EN (1 << 1)
+
+/* HDMI Version 1.4 */
+/* Control registers */
+/* #define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000) */
+/* #define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004) */
+#define HDMI_HDCP_KEY_LOAD HDMI_CTRL_BASE(0x0008)
+/* #define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C) */
+#define HDMI_INTC_CON_1 HDMI_CTRL_BASE(0x0010)
+#define HDMI_INTC_FLAG_1 HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_STATUS_0 HDMI_CTRL_BASE(0x0020)
+#define HDMI_PHY_STATUS_CMU HDMI_CTRL_BASE(0x0024)
+#define HDMI_PHY_STATUS_PLL HDMI_CTRL_BASE(0x0028)
+#define HDMI_PHY_CON_0 HDMI_CTRL_BASE(0x0030)
+#define HDMI_HPD_CTRL HDMI_CTRL_BASE(0x0040)
+#define HDMI_HPD_ST HDMI_CTRL_BASE(0x0044)
+#define HDMI_HPD_TH_X HDMI_CTRL_BASE(0x0050)
+#define HDMI_AUDIO_CLKSEL HDMI_CTRL_BASE(0x0070)
+#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0074)
+#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0078)
+#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
+#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
+
+/* Video related registers */
+#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
+#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
+#define HDMI_CMAX HDMI_CORE_BASE(0x0068)
+#define HDMI_CMIN HDMI_CORE_BASE(0x006C)
+
+#define HDMI_V2_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V2_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V1_BLANK_0 HDMI_CORE_BASE(0x00B8)
+#define HDMI_V1_BLANK_1 HDMI_CORE_BASE(0x00BC)
+
+#define HDMI_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_LINE_0 HDMI_CORE_BASE(0x00C8)
+#define HDMI_H_LINE_1 HDMI_CORE_BASE(0x00CC)
+
+#define HDMI_HSYNC_POL HDMI_CORE_BASE(0x00E0)
+
+#define HDMI_V_BLANK_F0_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F0_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F1_0 HDMI_CORE_BASE(0x0118)
+#define HDMI_V_BLANK_F1_1 HDMI_CORE_BASE(0x011C)
+
+#define HDMI_H_SYNC_START_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_START_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_END_0 HDMI_CORE_BASE(0x0128)
+#define HDMI_H_SYNC_END_1 HDMI_CORE_BASE(0x012C)
+
+#define HDMI_V_SYNC_LINE_BEF_2_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_LINE_BEF_2_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_LINE_BEF_1_0 HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_LINE_BEF_1_1 HDMI_CORE_BASE(0x013C)
+
+#define HDMI_V_SYNC_LINE_AFT_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_LINE_AFT_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_LINE_AFT_1_0 HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_LINE_AFT_1_1 HDMI_CORE_BASE(0x014C)
+
+#define HDMI_V_SYNC_LINE_AFT_PXL_2_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_LINE_AFT_PXL_2_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_LINE_AFT_PXL_1_0 HDMI_CORE_BASE(0x0158)
+#define HDMI_V_SYNC_LINE_AFT_PXL_1_1 HDMI_CORE_BASE(0x015C)
+
+#define HDMI_V_BLANK_F2_0 HDMI_CORE_BASE(0x0160)
+#define HDMI_V_BLANK_F2_1 HDMI_CORE_BASE(0x0164)
+#define HDMI_V_BLANK_F3_0 HDMI_CORE_BASE(0x0168)
+#define HDMI_V_BLANK_F3_1 HDMI_CORE_BASE(0x016C)
+#define HDMI_V_BLANK_F4_0 HDMI_CORE_BASE(0x0170)
+#define HDMI_V_BLANK_F4_1 HDMI_CORE_BASE(0x0174)
+#define HDMI_V_BLANK_F5_0 HDMI_CORE_BASE(0x0178)
+#define HDMI_V_BLANK_F5_1 HDMI_CORE_BASE(0x017C)
+
+#define HDMI_V_SYNC_LINE_AFT_3_0 HDMI_CORE_BASE(0x0180)
+#define HDMI_V_SYNC_LINE_AFT_3_1 HDMI_CORE_BASE(0x0184)
+#define HDMI_V_SYNC_LINE_AFT_4_0 HDMI_CORE_BASE(0x0188)
+#define HDMI_V_SYNC_LINE_AFT_4_1 HDMI_CORE_BASE(0x018C)
+#define HDMI_V_SYNC_LINE_AFT_5_0 HDMI_CORE_BASE(0x0190)
+#define HDMI_V_SYNC_LINE_AFT_5_1 HDMI_CORE_BASE(0x0194)
+#define HDMI_V_SYNC_LINE_AFT_6_0 HDMI_CORE_BASE(0x0198)
+#define HDMI_V_SYNC_LINE_AFT_6_1 HDMI_CORE_BASE(0x019C)
+
+#define HDMI_V_SYNC_LINE_AFT_PXL_3_0 HDMI_CORE_BASE(0x01A0)
+#define HDMI_V_SYNC_LINE_AFT_PXL_3_1 HDMI_CORE_BASE(0x01A4)
+#define HDMI_V_SYNC_LINE_AFT_PXL_4_0 HDMI_CORE_BASE(0x01A8)
+#define HDMI_V_SYNC_LINE_AFT_PXL_4_1 HDMI_CORE_BASE(0x01AC)
+#define HDMI_V_SYNC_LINE_AFT_PXL_5_0 HDMI_CORE_BASE(0x01B0)
+#define HDMI_V_SYNC_LINE_AFT_PXL_5_1 HDMI_CORE_BASE(0x01B4)
+#define HDMI_V_SYNC_LINE_AFT_PXL_6_0 HDMI_CORE_BASE(0x01B8)
+#define HDMI_V_SYNC_LINE_AFT_PXL_6_1 HDMI_CORE_BASE(0x01BC)
+
+#define HDMI_VACT_SPACE_1_0 HDMI_CORE_BASE(0x01C0)
+#define HDMI_VACT_SPACE_1_1 HDMI_CORE_BASE(0x01C4)
+#define HDMI_VACT_SPACE_2_0 HDMI_CORE_BASE(0x01C8)
+#define HDMI_VACT_SPACE_2_1 HDMI_CORE_BASE(0x01CC)
+#define HDMI_VACT_SPACE_3_0 HDMI_CORE_BASE(0x01D0)
+#define HDMI_VACT_SPACE_3_1 HDMI_CORE_BASE(0x01D4)
+#define HDMI_VACT_SPACE_4_0 HDMI_CORE_BASE(0x01D8)
+#define HDMI_VACT_SPACE_4_1 HDMI_CORE_BASE(0x01DC)
+#define HDMI_VACT_SPACE_5_0 HDMI_CORE_BASE(0x01E0)
+#define HDMI_VACT_SPACE_5_1 HDMI_CORE_BASE(0x01E4)
+#define HDMI_VACT_SPACE_6_0 HDMI_CORE_BASE(0x01E8)
+#define HDMI_VACT_SPACE_6_1 HDMI_CORE_BASE(0x01EC)
+
+#define HDMI_GCP_CON HDMI_CORE_BASE(0x0200)
+#define HDMI_GCP_BYTE1 HDMI_CORE_BASE(0x0210)
+#define HDMI_GCP_BYTE2 HDMI_CORE_BASE(0x0214)
+#define HDMI_GCP_BYTE3 HDMI_CORE_BASE(0x0218)
+
+/* Audio related registers */
+#define HDMI_ASP_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_ASP_SP_FLAT HDMI_CORE_BASE(0x0304)
+#define HDMI_ASP_CHCFG0 HDMI_CORE_BASE(0x0310)
+#define HDMI_ASP_CHCFG1 HDMI_CORE_BASE(0x0314)
+#define HDMI_ASP_CHCFG2 HDMI_CORE_BASE(0x0318)
+#define HDMI_ASP_CHCFG3 HDMI_CORE_BASE(0x031C)
+
+#define HDMI_ACR_CON HDMI_CORE_BASE(0x0400)
+#define HDMI_ACR_MCTS0 HDMI_CORE_BASE(0x0410)
+#define HDMI_ACR_MCTS1 HDMI_CORE_BASE(0x0414)
+#define HDMI_ACR_MCTS2 HDMI_CORE_BASE(0x0418)
+#define HDMI_ACR_CTS0 HDMI_CORE_BASE(0x0420)
+#define HDMI_ACR_CTS1 HDMI_CORE_BASE(0x0424)
+#define HDMI_ACR_CTS2 HDMI_CORE_BASE(0x0428)
+#define HDMI_ACR_N0 HDMI_CORE_BASE(0x0430)
+#define HDMI_ACR_N1 HDMI_CORE_BASE(0x0434)
+#define HDMI_ACR_N2 HDMI_CORE_BASE(0x0438)
+
+/* Packet related registers */
+#define HDMI_ACP_CON HDMI_CORE_BASE(0x0500)
+#define HDMI_ACP_TYPE HDMI_CORE_BASE(0x0514)
+#define HDMI_ACP_DATA(n) HDMI_CORE_BASE(0x0520 + 4 * (n))
+
+#define HDMI_ISRC_CON HDMI_CORE_BASE(0x0600)
+#define HDMI_ISRC1_HEADER1 HDMI_CORE_BASE(0x0614)
+#define HDMI_ISRC1_DATA(n) HDMI_CORE_BASE(0x0620 + 4 * (n))
+#define HDMI_ISRC2_DATA(n) HDMI_CORE_BASE(0x06A0 + 4 * (n))
+
+#define HDMI_AVI_CON HDMI_CORE_BASE(0x0700)
+#define HDMI_AVI_HEADER0 HDMI_CORE_BASE(0x0710)
+#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
+#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
+#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
+
+#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
+#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
+#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
+#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
+#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
+
+#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
+#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
+#define HDMI_MPG_DATA(n) HDMI_CORE_BASE(0x0920 + 4 * (n))
+
+#define HDMI_SPD_CON HDMI_CORE_BASE(0x0A00)
+#define HDMI_SPD_HEADER0 HDMI_CORE_BASE(0x0A10)
+#define HDMI_SPD_HEADER1 HDMI_CORE_BASE(0x0A14)
+#define HDMI_SPD_HEADER2 HDMI_CORE_BASE(0x0A18)
+#define HDMI_SPD_DATA(n) HDMI_CORE_BASE(0x0A20 + 4 * (n))
+
+#define HDMI_GAMUT_CON HDMI_CORE_BASE(0x0B00)
+#define HDMI_GAMUT_HEADER0 HDMI_CORE_BASE(0x0B10)
+#define HDMI_GAMUT_HEADER1 HDMI_CORE_BASE(0x0B14)
+#define HDMI_GAMUT_HEADER2 HDMI_CORE_BASE(0x0B18)
+#define HDMI_GAMUT_METADATA(n) HDMI_CORE_BASE(0x0B20 + 4 * (n))
+
+#define HDMI_VSI_CON HDMI_CORE_BASE(0x0C00)
+#define HDMI_VSI_HEADER0 HDMI_CORE_BASE(0x0C10)
+#define HDMI_VSI_HEADER1 HDMI_CORE_BASE(0x0C14)
+#define HDMI_VSI_HEADER2 HDMI_CORE_BASE(0x0C18)
+#define HDMI_VSI_DATA(n) HDMI_CORE_BASE(0x0C20 + 4 * (n))
+
+#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x0D00)
+#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x0D04)
+
+#define HDMI_AN_SEED_SEL HDMI_CORE_BASE(0x0E48)
+#define HDMI_AN_SEED_0 HDMI_CORE_BASE(0x0E58)
+#define HDMI_AN_SEED_1 HDMI_CORE_BASE(0x0E5C)
+#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
+#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
+
+/* HDCP related registers */
+#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
+#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
+
+#define HDMI_HDCP_KSV_LIST_CON HDMI_CORE_BASE(0x7064)
+#define HDMI_HDCP_SHA_RESULT HDMI_CORE_BASE(0x7070)
+#define HDMI_HDCP_CTRL1 HDMI_CORE_BASE(0x7080)
+#define HDMI_HDCP_CTRL2 HDMI_CORE_BASE(0x7084)
+#define HDMI_HDCP_CHECK_RESULT HDMI_CORE_BASE(0x7090)
+#define HDMI_HDCP_BKSV(n) HDMI_CORE_BASE(0x70A0 + 4 * (n))
+#define HDMI_HDCP_AKSV(n) HDMI_CORE_BASE(0x70C0 + 4 * (n))
+#define HDMI_HDCP_AN(n) HDMI_CORE_BASE(0x70E0 + 4 * (n))
+
+#define HDMI_HDCP_BCAPS HDMI_CORE_BASE(0x7100)
+#define HDMI_HDCP_BSTATUS_0 HDMI_CORE_BASE(0x7110)
+#define HDMI_HDCP_BSTATUS_1 HDMI_CORE_BASE(0x7114)
+#define HDMI_HDCP_RI_0 HDMI_CORE_BASE(0x7140)
+#define HDMI_HDCP_RI_1 HDMI_CORE_BASE(0x7144)
+#define HDMI_HDCP_I2C_INT HDMI_CORE_BASE(0x7180)
+#define HDMI_HDCP_AN_INT HDMI_CORE_BASE(0x7190)
+#define HDMI_HDCP_WDT_INT HDMI_CORE_BASE(0x71A0)
+#define HDMI_HDCP_RI_INT HDMI_CORE_BASE(0x71B0)
+#define HDMI_HDCP_RI_COMPARE_0 HDMI_CORE_BASE(0x71D0)
+#define HDMI_HDCP_RI_COMPARE_1 HDMI_CORE_BASE(0x71D4)
+#define HDMI_HDCP_FRAME_COUNT HDMI_CORE_BASE(0x71E0)
+
+#define HDMI_RGB_ROUND_EN HDMI_CORE_BASE(0xD500)
+#define HDMI_VACT_SPACE_R_0 HDMI_CORE_BASE(0xD504)
+#define HDMI_VACT_SPACE_R_1 HDMI_CORE_BASE(0xD508)
+#define HDMI_VACT_SPACE_G_0 HDMI_CORE_BASE(0xD50C)
+#define HDMI_VACT_SPACE_G_1 HDMI_CORE_BASE(0xD510)
+#define HDMI_VACT_SPACE_B_0 HDMI_CORE_BASE(0xD514)
+#define HDMI_VACT_SPACE_B_1 HDMI_CORE_BASE(0xD518)
+
+#define HDMI_BLUE_SCREEN_B_0 HDMI_CORE_BASE(0xD520)
+#define HDMI_BLUE_SCREEN_B_1 HDMI_CORE_BASE(0xD524)
+#define HDMI_BLUE_SCREEN_G_0 HDMI_CORE_BASE(0xD528)
+#define HDMI_BLUE_SCREEN_G_1 HDMI_CORE_BASE(0xD52C)
+#define HDMI_BLUE_SCREEN_R_0 HDMI_CORE_BASE(0xD530)
+#define HDMI_BLUE_SCREEN_R_1 HDMI_CORE_BASE(0xD534)
+
+/* HDMI I2S register */
+#define HDMI_I2S_CLK_CON HDMI_I2S_BASE(0x000)
+#define HDMI_I2S_CON_1 HDMI_I2S_BASE(0x004)
+#define HDMI_I2S_CON_2 HDMI_I2S_BASE(0x008)
+#define HDMI_I2S_PIN_SEL_0 HDMI_I2S_BASE(0x00c)
+#define HDMI_I2S_PIN_SEL_1 HDMI_I2S_BASE(0x010)
+#define HDMI_I2S_PIN_SEL_2 HDMI_I2S_BASE(0x014)
+#define HDMI_I2S_PIN_SEL_3 HDMI_I2S_BASE(0x018)
+#define HDMI_I2S_DSD_CON HDMI_I2S_BASE(0x01c)
+#define HDMI_I2S_MUX_CON HDMI_I2S_BASE(0x020)
+#define HDMI_I2S_CH_ST_CON HDMI_I2S_BASE(0x024)
+#define HDMI_I2S_CH_ST_0 HDMI_I2S_BASE(0x028)
+#define HDMI_I2S_CH_ST_1 HDMI_I2S_BASE(0x02c)
+#define HDMI_I2S_CH_ST_2 HDMI_I2S_BASE(0x030)
+#define HDMI_I2S_CH_ST_3 HDMI_I2S_BASE(0x034)
+#define HDMI_I2S_CH_ST_4 HDMI_I2S_BASE(0x038)
+#define HDMI_I2S_CH_ST_SH_0 HDMI_I2S_BASE(0x03c)
+#define HDMI_I2S_CH_ST_SH_1 HDMI_I2S_BASE(0x040)
+#define HDMI_I2S_CH_ST_SH_2 HDMI_I2S_BASE(0x044)
+#define HDMI_I2S_CH_ST_SH_3 HDMI_I2S_BASE(0x048)
+#define HDMI_I2S_CH_ST_SH_4 HDMI_I2S_BASE(0x04c)
+#define HDMI_I2S_MUX_CH HDMI_I2S_BASE(0x054)
+#define HDMI_I2S_MUX_CUV HDMI_I2S_BASE(0x058)
+
+/* I2S bit definition */
+
+/* I2S_CLK_CON */
+#define HDMI_I2S_CLK_DIS (0)
+#define HDMI_I2S_CLK_EN (1)
+
+/* I2S_CON_1 */
+#define HDMI_I2S_SCLK_FALLING_EDGE (0 << 1)
+#define HDMI_I2S_SCLK_RISING_EDGE (1 << 1)
+#define HDMI_I2S_L_CH_LOW_POL (0)
+#define HDMI_I2S_L_CH_HIGH_POL (1)
+
+/* I2S_CON_2 */
+#define HDMI_I2S_MSB_FIRST_MODE (0 << 6)
+#define HDMI_I2S_LSB_FIRST_MODE (1 << 6)
+#define HDMI_I2S_BIT_CH_32FS (0 << 4)
+#define HDMI_I2S_BIT_CH_48FS (1 << 4)
+#define HDMI_I2S_BIT_CH_RESERVED (2 << 4)
+#define HDMI_I2S_SDATA_16BIT (1 << 2)
+#define HDMI_I2S_SDATA_20BIT (2 << 2)
+#define HDMI_I2S_SDATA_24BIT (3 << 2)
+#define HDMI_I2S_BASIC_FORMAT (0)
+#define HDMI_I2S_L_JUST_FORMAT (2)
+#define HDMI_I2S_R_JUST_FORMAT (3)
+#define HDMI_I2S_CON_2_CLR (~(0xFF))
+#define HDMI_I2S_SET_BIT_CH(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SET_SDATA_BIT(x) (((x) & 0x7) << 2)
+
+/* I2S_PIN_SEL_0 */
+#define HDMI_I2S_SEL_SCLK(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_LRCK(x) ((x) & 0x7)
+
+/* I2S_PIN_SEL_1 */
+#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7)
+
+/* I2S_PIN_SEL_2 */
+#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7)
+
+/* I2S_PIN_SEL_3 */
+#define HDMI_I2S_SEL_DSD(x) ((x) & 0x7)
+
+/* I2S_DSD_CON */
+#define HDMI_I2S_DSD_CLK_RI_EDGE (1 << 1)
+#define HDMI_I2S_DSD_CLK_FA_EDGE (0 << 1)
+#define HDMI_I2S_DSD_ENABLE (1)
+#define HDMI_I2S_DSD_DISABLE (0)
+
+/* I2S_MUX_CON */
+#define HDMI_I2S_NOISE_FILTER_ZERO (0 << 5)
+#define HDMI_I2S_NOISE_FILTER_2_STAGE (1 << 5)
+#define HDMI_I2S_NOISE_FILTER_3_STAGE (2 << 5)
+#define HDMI_I2S_NOISE_FILTER_4_STAGE (3 << 5)
+#define HDMI_I2S_NOISE_FILTER_5_STAGE (4 << 5)
+#define HDMI_I2S_IN_DISABLE (1 << 4)
+#define HDMI_I2S_IN_ENABLE (0 << 4)
+#define HDMI_I2S_AUD_SPDIF (0 << 2)
+#define HDMI_I2S_AUD_I2S (1 << 2)
+#define HDMI_I2S_AUD_DSD (2 << 2)
+#define HDMI_I2S_CUV_SPDIF_ENABLE (0 << 1)
+#define HDMI_I2S_CUV_I2S_ENABLE (1 << 1)
+#define HDMI_I2S_MUX_DISABLE (0)
+#define HDMI_I2S_MUX_ENABLE (1)
+#define HDMI_I2S_MUX_CON_CLR (~(0xFF))
+
+/* I2S_CH_ST_CON */
+#define HDMI_I2S_CH_STATUS_RELOAD (1)
+#define HDMI_I2S_CH_ST_CON_CLR (~(1))
+
+/* I2S_CH_ST_0 / I2S_CH_ST_SH_0 */
+#define HDMI_I2S_CH_STATUS_MODE_0 (0 << 6)
+#define HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH (0 << 3)
+#define HDMI_I2S_2AUD_CH_WITH_PREEMPH (1 << 3)
+#define HDMI_I2S_DEFAULT_EMPHASIS (0 << 3)
+#define HDMI_I2S_COPYRIGHT (0 << 2)
+#define HDMI_I2S_NO_COPYRIGHT (1 << 2)
+#define HDMI_I2S_LINEAR_PCM (0 << 1)
+#define HDMI_I2S_NO_LINEAR_PCM (1 << 1)
+#define HDMI_I2S_CONSUMER_FORMAT (0)
+#define HDMI_I2S_PROF_FORMAT (1)
+#define HDMI_I2S_CH_ST_0_CLR (~(0xFF))
+
+/* I2S_CH_ST_1 / I2S_CH_ST_SH_1 */
+#define HDMI_I2S_CD_PLAYER (0x00)
+#define HDMI_I2S_DAT_PLAYER (0x03)
+#define HDMI_I2S_DCC_PLAYER (0x43)
+#define HDMI_I2S_MINI_DISC_PLAYER (0x49)
+
+/* I2S_CH_ST_2 / I2S_CH_ST_SH_2 */
+#define HDMI_I2S_CHANNEL_NUM_MASK (0xF << 4)
+#define HDMI_I2S_SOURCE_NUM_MASK (0xF)
+#define HDMI_I2S_SET_CHANNEL_NUM(x) (((x) & (0xF)) << 4)
+#define HDMI_I2S_SET_SOURCE_NUM(x) ((x) & (0xF))
+
+/* I2S_CH_ST_3 / I2S_CH_ST_SH_3 */
+#define HDMI_I2S_CLK_ACCUR_LEVEL_1 (1 << 4)
+#define HDMI_I2S_CLK_ACCUR_LEVEL_2 (0 << 4)
+#define HDMI_I2S_CLK_ACCUR_LEVEL_3 (2 << 4)
+#define HDMI_I2S_SMP_FREQ_44_1 (0x0)
+#define HDMI_I2S_SMP_FREQ_48 (0x2)
+#define HDMI_I2S_SMP_FREQ_32 (0x3)
+#define HDMI_I2S_SMP_FREQ_96 (0xA)
+#define HDMI_I2S_SET_SMP_FREQ(x) ((x) & (0xF))
+
+/* I2S_CH_ST_4 / I2S_CH_ST_SH_4 */
+#define HDMI_I2S_ORG_SMP_FREQ_44_1 (0xF << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_88_2 (0x7 << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_22_05 (0xB << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_176_4 (0x3 << 4)
+#define HDMI_I2S_WORD_LEN_NOT_DEFINE (0x0 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_20BITS (0x1 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_22BITS (0x2 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_23BITS (0x4 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_24BITS (0x5 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_21BITS (0x6 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_16BITS (0x1 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_18BITS (0x2 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_19BITS (0x4 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_20BITS (0x5 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_17BITS (0x6 << 1)
+#define HDMI_I2S_WORD_LEN_MAX_24BITS (1)
+#define HDMI_I2S_WORD_LEN_MAX_20BITS (0)
+
+/* I2S_MUX_CH */
+#define HDMI_I2S_CH3_R_EN (1 << 7)
+#define HDMI_I2S_CH3_L_EN (1 << 6)
+#define HDMI_I2S_CH3_EN (3 << 6)
+#define HDMI_I2S_CH2_R_EN (1 << 5)
+#define HDMI_I2S_CH2_L_EN (1 << 4)
+#define HDMI_I2S_CH2_EN (3 << 4)
+#define HDMI_I2S_CH1_R_EN (1 << 3)
+#define HDMI_I2S_CH1_L_EN (1 << 2)
+#define HDMI_I2S_CH1_EN (3 << 2)
+#define HDMI_I2S_CH0_R_EN (1 << 1)
+#define HDMI_I2S_CH0_L_EN (1)
+#define HDMI_I2S_CH0_EN (3)
+#define HDMI_I2S_CH_ALL_EN (0xFF)
+#define HDMI_I2S_MUX_CH_CLR (~HDMI_I2S_CH_ALL_EN)
+
+/* I2S_MUX_CUV */
+#define HDMI_I2S_CUV_R_EN (1 << 1)
+#define HDMI_I2S_CUV_L_EN (1)
+#define HDMI_I2S_CUV_RL_EN (0x03)
+
+/* I2S_CUV_L_R */
+#define HDMI_I2S_CUV_R_DATA_MASK (0x7 << 4)
+#define HDMI_I2S_CUV_L_DATA_MASK (0x7)
+
+/* Timing generator registers */
+/* TG configure/status registers */
+#define HDMI_TG_VACT_ST3_L HDMI_TG_BASE(0x0068)
+#define HDMI_TG_VACT_ST3_H HDMI_TG_BASE(0x006c)
+#define HDMI_TG_VACT_ST4_L HDMI_TG_BASE(0x0070)
+#define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074)
+#define HDMI_TG_3D HDMI_TG_BASE(0x00F0)
+
#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 754e14b..42e665c 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -16,8 +16,7 @@ config DRM_GMA600
depends on DRM_GMA500
help
Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
- platforms with LVDS ports. HDMI and MIPI are not currently
- supported.
+ platforms with LVDS ports. MIPI is not currently supported.
config DRM_GMA3600
bool "Intel GMA3600/3650 support (Experimental)"
@@ -25,3 +24,10 @@ config DRM_GMA3600
help
Say yes to include basic support for Intel GMA3600/3650 (Intel
Cedar Trail) platforms.
+
+config DRM_MEDFIELD
+ bool "Intel Medfield support (Experimental)"
+ depends on DRM_GMA500 && X86_INTEL_MID
+ help
+ Say yes to include support for the Intel Medfield platform.
+
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 81c103be..1583982 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -37,4 +37,14 @@ gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_hdmi.o \
oaktrail_hdmi_i2c.o
+gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \
+ mdfld_output.o \
+ mdfld_intel_display.o \
+ mdfld_dsi_output.o \
+ mdfld_dsi_dpi.o \
+ mdfld_dsi_pkg_sender.o \
+ mdfld_tpo_vid.o \
+ mdfld_tmd_vid.o \
+ tc35876x-dsi-lvds.o
+
obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 53404af..a54cc73 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -202,13 +202,12 @@ static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
pci_dev_put(pci_root);
}
-#define PSB_APM_CMD 0x0
-#define PSB_APM_STS 0x04
#define PSB_PM_SSC 0x20
#define PSB_PM_SSS 0x30
-#define PSB_PWRGT_GFX_MASK 0x3
-#define CDV_PWRGT_DISPLAY_CNTR 0x000fc00c
-#define CDV_PWRGT_DISPLAY_STS 0x000fc00c
+#define PSB_PWRGT_GFX_ON 0x02
+#define PSB_PWRGT_GFX_OFF 0x01
+#define PSB_PWRGT_GFX_D0 0x00
+#define PSB_PWRGT_GFX_D3 0x03
static void cdv_init_pm(struct drm_device *dev)
{
@@ -221,26 +220,22 @@ static void cdv_init_pm(struct drm_device *dev)
dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
PSB_OSPMBA) & 0xFFFF;
- /* Force power on for now */
+ /* Power status */
pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
- pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+ /* Enable the GPU */
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+ pwr_cnt |= PSB_PWRGT_GFX_ON;
outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+ /* Wait for the GPU power */
for (i = 0; i < 5; i++) {
u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
- break;
- udelay(10);
- }
- pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
- pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
- outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
- for (i = 0; i < 5; i++) {
- u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
- if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
- break;
+ return;
udelay(10);
}
+ dev_err(dev->dev, "GPU: power management timed out.\n");
}
/**
@@ -249,11 +244,50 @@ static void cdv_init_pm(struct drm_device *dev)
*
* Save the state we need in order to be able to restore the interface
* upon resume from suspend
- *
- * FIXME: review
*/
static int cdv_save_display_registers(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
+ struct drm_connector *connector;
+
+ dev_info(dev->dev, "Saving GPU registers.\n");
+
+ pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
+
+ regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
+ regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
+
+ regs->cdv.saveDSPARB = REG_READ(DSPARB);
+ regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1);
+ regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2);
+ regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3);
+ regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4);
+ regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5);
+ regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6);
+
+ regs->cdv.saveADPA = REG_READ(ADPA);
+
+ regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL);
+ regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+ regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2);
+ regs->cdv.saveLVDS = REG_READ(LVDS);
+
+ regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+
+ regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS);
+ regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS);
+ regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE);
+
+ regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL);
+
+ regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R);
+ regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
return 0;
}
@@ -267,16 +301,113 @@ static int cdv_save_display_registers(struct drm_device *dev)
*/
static int cdv_restore_display_registers(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
+ struct drm_connector *connector;
+ u32 temp;
+
+ pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
+
+ REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
+ REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
+
+ /* BIOS does below anyway */
+ REG_WRITE(DPIO_CFG, 0);
+ REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+
+ temp = REG_READ(DPLL_A);
+ if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
+ REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(DPLL_A);
+ }
+
+ temp = REG_READ(DPLL_B);
+ if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
+ REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(DPLL_B);
+ }
+
+ udelay(500);
+
+ REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]);
+ REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]);
+ REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]);
+ REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]);
+ REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]);
+ REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]);
+
+ REG_WRITE(DSPARB, regs->cdv.saveDSPARB);
+ REG_WRITE(ADPA, regs->cdv.saveADPA);
+
+ REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2);
+ REG_WRITE(LVDS, regs->cdv.saveLVDS);
+ REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL);
+ REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS);
+ REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL);
+ REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS);
+ REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS);
+ REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE);
+ REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL);
+
+ REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL);
+
+ REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER);
+ REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
+
+ /* Fix arbitration bug */
+ CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+
+ drm_mode_config_reset(dev);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+ /* Resume the modeset for every activated CRTC */
+ drm_helper_resume_force_mode(dev);
return 0;
}
static int cdv_power_down(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt, pwr_mask, pwr_sts;
+ int tries = 5;
+
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+ pwr_cnt |= PSB_PWRGT_GFX_OFF;
+ pwr_mask = PSB_PWRGT_GFX_MASK;
+
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+ while (tries--) {
+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D3)
+ return 0;
+ udelay(10);
+ }
return 0;
}
static int cdv_power_up(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt, pwr_mask, pwr_sts;
+ int tries = 5;
+
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+ pwr_cnt |= PSB_PWRGT_GFX_ON;
+ pwr_mask = PSB_PWRGT_GFX_MASK;
+
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+ while (tries--) {
+ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D0)
+ return 0;
+ udelay(10);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 2a88b7b..9561e17 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -26,7 +26,7 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *
extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
-extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+static inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
/* FIXME: msleep ?? */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index c100f3e9..a71a6cd 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -32,6 +32,7 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "power.h"
+#include "cdv_device.h"
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 18d1152..be84559 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -344,7 +344,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
/*
* Returns whether any encoder on the specified pipe is of the specified type
*/
-bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -476,7 +476,7 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
return err != target;
}
-int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
@@ -569,7 +569,6 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
- bool enabled;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -663,7 +662,6 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
break;
}
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3F3E);
}
@@ -680,22 +678,6 @@ static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
-void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
-{
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-void cdv_intel_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- /* lvds has its own version of commit see cdv_intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -745,7 +727,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
int refclk;
struct cdv_intel_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
- bool ok, is_sdvo = false, is_dvo = false;
+ bool ok;
bool is_crt = false, is_lvds = false, is_tv = false;
bool is_hdmi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -763,12 +745,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
- case INTEL_OUTPUT_SDVO:
- is_sdvo = true;
- break;
- case INTEL_OUTPUT_DVO:
- is_dvo = true;
- break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
@@ -928,7 +904,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv =
@@ -968,7 +944,7 @@ void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
- dev_priv->save_palette_a[i] =
+ dev_priv->regs.psb.save_palette_a[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
@@ -1338,18 +1314,20 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
gma_power_end(dev);
} else {
dpll = (pipe == 0) ?
- dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+ dev_priv->regs.psb.saveDPLL_A :
+ dev_priv->regs.psb.saveDPLL_B;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = (pipe == 0) ?
- dev_priv->saveFPA0 :
- dev_priv->saveFPB0;
+ dev_priv->regs.psb.saveFPA0 :
+ dev_priv->regs.psb.saveFPB0;
else
fp = (pipe == 0) ?
- dev_priv->saveFPA1 :
- dev_priv->saveFPB1;
+ dev_priv->regs.psb.saveFPA1 :
+ dev_priv->regs.psb.saveFPB1;
- is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+ is_lvds = (pipe == 1) &&
+ (dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
}
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
@@ -1419,13 +1397,17 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
gma_power_end(dev);
} else {
htot = (pipe == 0) ?
- dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+ dev_priv->regs.psb.saveHTOTAL_A :
+ dev_priv->regs.psb.saveHTOTAL_B;
hsync = (pipe == 0) ?
- dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+ dev_priv->regs.psb.saveHSYNC_A :
+ dev_priv->regs.psb.saveHSYNC_B;
vtot = (pipe == 0) ?
- dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+ dev_priv->regs.psb.saveVTOTAL_A :
+ dev_priv->regs.psb.saveVTOTAL_B;
vsync = (pipe == 0) ?
- dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+ dev_priv->regs.psb.saveVSYNC_A :
+ dev_priv->regs.psb.saveVSYNC_B;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -1475,34 +1457,3 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.set_config = cdv_crtc_set_config,
.destroy = cdv_intel_crtc_destroy,
};
-
-/*
- * Set the default value of cursor control and base register
- * to zero. This is a workaround for h/w defect on oaktrail
- */
-void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
-{
- uint32_t control;
- uint32_t base;
-
- switch (pipe) {
- case 0:
- control = CURACNTR;
- base = CURABASE;
- break;
- case 1:
- control = CURBCNTR;
- base = CURBBASE;
- break;
- case 2:
- control = CURCCNTR;
- base = CURCBASE;
- break;
- default:
- return;
- }
-
- REG_WRITE(control, 0);
- REG_WRITE(base, 0);
-}
-
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index de25560..8d52695 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -34,6 +34,7 @@
#include "psb_intel_drv.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
+#include "cdv_device.h"
#include <linux/pm_runtime.h>
/* hdmi control bits */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 50e744b..8359c1a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -78,13 +78,14 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
gma_power_end(dev);
} else
- retval = ((dev_priv->saveBLC_PWM_CTL &
+ retval = ((dev_priv->regs.saveBLC_PWM_CTL &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
return retval;
}
+#if 0
/*
* Set LVDS backlight level by I2C command
*/
@@ -165,6 +166,7 @@ void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
else
cdv_lvds_pwm_set_brightness(dev, level);
}
+#endif
/**
* Sets the backlight level.
@@ -184,9 +186,9 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
gma_power_end(dev);
} else {
- blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+ blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
~BACKLIGHT_DUTY_CYCLE_MASK;
- dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
}
}
@@ -242,7 +244,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
{
}
-int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -267,7 +269,7 @@ int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -436,7 +438,7 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
* Unregister the DDC bus for this connector then free the driver private
* structure.
*/
-void cdv_intel_lvds_destroy(struct drm_connector *connector)
+static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(connector);
@@ -448,7 +450,7 @@ void cdv_intel_lvds_destroy(struct drm_connector *connector)
kfree(connector);
}
-int cdv_intel_lvds_set_property(struct drm_connector *connector,
+static int cdv_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index be61673..8ea202f 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -111,39 +111,6 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-void psbfb_suspend(struct drm_device *dev)
-{
- struct drm_framebuffer *fb;
-
- console_lock();
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct fb_info *info = psbfb->fbdev;
- fb_set_suspend(info, 1);
- drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
- }
- mutex_unlock(&dev->mode_config.mutex);
- console_unlock();
-}
-
-void psbfb_resume(struct drm_device *dev)
-{
- struct drm_framebuffer *fb;
-
- console_lock();
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct fb_info *info = psbfb->fbdev;
- fb_set_suspend(info, 0);
- drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
- }
- mutex_unlock(&dev->mode_config.mutex);
- console_unlock();
- drm_helper_disable_unused_functions(dev);
-}
-
static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct psb_framebuffer *psbfb = vma->vm_private_data;
@@ -158,7 +125,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- address = (unsigned long)vmf->virtual_address;
+ address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -390,6 +357,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
bpp = sizes->surface_bpp;
+ depth = sizes->surface_depth;
/* No 24bit packed */
if (bpp == 24)
@@ -402,7 +370,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
* is ok with some fonts
*/
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
- depth = sizes->surface_depth;
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
@@ -462,6 +429,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
fbdev->psb_fb_helper.fb = fb;
fbdev->psb_fb_helper.fbdev = info;
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
strcpy(info->fix.id, "psbfb");
info->flags = FBINFO_DEFAULT;
@@ -499,18 +467,13 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
sizes->fb_width, sizes->fb_height);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
- info->pixmap.size = 64 * 1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
dev_info(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
@@ -559,11 +522,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create
static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
}
static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, int regno)
{
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
}
static int psbfb_probe(struct drm_fb_helper *helper,
@@ -588,7 +561,7 @@ struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.fb_probe = psbfb_probe,
};
-int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
{
struct fb_info *info;
struct psb_framebuffer *psbfb = &fbdev->pfb;
@@ -630,7 +603,7 @@ int psb_fbdev_init(struct drm_device *dev)
return 0;
}
-void psb_fbdev_fini(struct drm_device *dev)
+static void psb_fbdev_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -724,10 +697,7 @@ static int psb_create_backlight_property(struct drm_device *dev)
if (dev_priv->backlight_property)
return 0;
- backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "backlight", 2);
- backlight->values[0] = 0;
- backlight->values[1] = 100;
+ backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
dev_priv->backlight_property = backlight;
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
index daac121..3c17634 100644
--- a/drivers/gpu/drm/gma500/gem_glue.c
+++ b/drivers/gpu/drm/gma500/gem_glue.c
@@ -19,6 +19,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
+#include "gem_glue.h"
void drm_gem_object_release_wrap(struct drm_gem_object *obj)
{
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index aff194f..c6465b4 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -57,7 +57,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
* Given a gtt_range object return the GTT offset of the page table
* entries for this gtt_range
*/
-u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long offset;
@@ -378,7 +378,7 @@ void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
kfree(gt);
}
-void psb_gtt_alloc(struct drm_device *dev)
+static void psb_gtt_alloc(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
init_rwsem(&dev_priv->gtt.sem);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 147584a..9db9052 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -395,7 +395,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
GFP_KERNEL);
if (dev_priv->gmbus == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
new file mode 100644
index 0000000..af65678
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -0,0 +1,691 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include "psb_drv.h"
+#include "mid_bios.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+#include "tc35876x-dsi-lvds.h"
+
+#include <asm/intel_scu_ipc.h>
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BRIGHTNESS_MIN_LEVEL 1
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+#define BLC_ADJUSTMENT_MAX 100
+
+#define MDFLD_BLC_PWM_PRECISION_FACTOR 10
+#define MDFLD_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define MDFLD_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static struct backlight_device *mdfld_backlight_device;
+
+int mdfld_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev =
+ (struct drm_device *)bl_get_data(mdfld_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int level = bd->props.brightness;
+
+ DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
+
+ /* Perform value bounds checking */
+ if (level < BRIGHTNESS_MIN_LEVEL)
+ level = BRIGHTNESS_MIN_LEVEL;
+
+ if (gma_power_begin(dev, false)) {
+ u32 adjusted_level = 0;
+
+ /*
+ * Adjust the backlight level with the percent in
+ * dev_priv->blc_adj2
+ */
+ adjusted_level = level * dev_priv->blc_adj2;
+ adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX;
+ dev_priv->brightness_adjusted = adjusted_level;
+
+ if (mdfld_get_panel_type(dev, 0) == TC35876X) {
+ if (dev_priv->dpi_panel_on[0] ||
+ dev_priv->dpi_panel_on[2])
+ tc35876x_brightness_control(dev,
+ dev_priv->brightness_adjusted);
+ } else {
+ if (dev_priv->dpi_panel_on[0])
+ mdfld_dsi_brightness_control(dev, 0,
+ dev_priv->brightness_adjusted);
+ }
+
+ if (dev_priv->dpi_panel_on[2])
+ mdfld_dsi_brightness_control(dev, 2,
+ dev_priv->brightness_adjusted);
+ gma_power_end(dev);
+ }
+
+ /* cache the brightness for later use */
+ dev_priv->brightness = level;
+ return 0;
+}
+
+static int mdfld_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev =
+ (struct drm_device *)bl_get_data(mdfld_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness);
+
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ return dev_priv->brightness;
+}
+
+static const struct backlight_ops mdfld_ops = {
+ .get_brightness = mdfld_get_brightness,
+ .update_status = mdfld_set_brightness,
+};
+
+static int device_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
+ dev->dev_private;
+
+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+
+ return 0;
+}
+
+static int mdfld_backlight_init(struct drm_device *dev)
+{
+ struct backlight_properties props;
+ int ret = 0;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+ props.type = BACKLIGHT_PLATFORM;
+ mdfld_backlight_device = backlight_device_register("mdfld-bl",
+ NULL, (void *)dev, &mdfld_ops, &props);
+
+ if (IS_ERR(mdfld_backlight_device))
+ return PTR_ERR(mdfld_backlight_device);
+
+ ret = device_backlight_init(dev);
+ if (ret)
+ return ret;
+
+ mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
+ mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+ backlight_update_status(mdfld_backlight_device);
+ return 0;
+}
+#endif
+
+struct backlight_device *mdfld_get_backlight_device(void)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ return mdfld_backlight_device;
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * mdfld_save_display_registers
+ *
+ * Description: We are going to suspend so save current display
+ * register state.
+ *
+ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
+ */
+static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct medfield_state *regs = &dev_priv->regs.mdfld;
+ int i;
+
+ /* register */
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 fp_reg = MRST_FPA0;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 htot_reg = HTOTAL_A;
+ u32 hblank_reg = HBLANK_A;
+ u32 hsync_reg = HSYNC_A;
+ u32 vtot_reg = VTOTAL_A;
+ u32 vblank_reg = VBLANK_A;
+ u32 vsync_reg = VSYNC_A;
+ u32 pipesrc_reg = PIPEASRC;
+ u32 dspstride_reg = DSPASTRIDE;
+ u32 dsplinoff_reg = DSPALINOFF;
+ u32 dsptileoff_reg = DSPATILEOFF;
+ u32 dspsize_reg = DSPASIZE;
+ u32 dsppos_reg = DSPAPOS;
+ u32 dspsurf_reg = DSPASURF;
+ u32 mipi_reg = MIPI;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 dspstatus_reg = PIPEASTAT;
+ u32 palette_reg = PALETTE_A;
+
+ /* pointer to values */
+ u32 *dpll_val = &regs->saveDPLL_A;
+ u32 *fp_val = &regs->saveFPA0;
+ u32 *pipeconf_val = &regs->savePIPEACONF;
+ u32 *htot_val = &regs->saveHTOTAL_A;
+ u32 *hblank_val = &regs->saveHBLANK_A;
+ u32 *hsync_val = &regs->saveHSYNC_A;
+ u32 *vtot_val = &regs->saveVTOTAL_A;
+ u32 *vblank_val = &regs->saveVBLANK_A;
+ u32 *vsync_val = &regs->saveVSYNC_A;
+ u32 *pipesrc_val = &regs->savePIPEASRC;
+ u32 *dspstride_val = &regs->saveDSPASTRIDE;
+ u32 *dsplinoff_val = &regs->saveDSPALINOFF;
+ u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
+ u32 *dspsize_val = &regs->saveDSPASIZE;
+ u32 *dsppos_val = &regs->saveDSPAPOS;
+ u32 *dspsurf_val = &regs->saveDSPASURF;
+ u32 *mipi_val = &regs->saveMIPI;
+ u32 *dspcntr_val = &regs->saveDSPACNTR;
+ u32 *dspstatus_val = &regs->saveDSPASTATUS;
+ u32 *palette_val = regs->save_palette_a;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ /* regester */
+ dpll_reg = MDFLD_DPLL_B;
+ fp_reg = MDFLD_DPLL_DIV0;
+ pipeconf_reg = PIPEBCONF;
+ htot_reg = HTOTAL_B;
+ hblank_reg = HBLANK_B;
+ hsync_reg = HSYNC_B;
+ vtot_reg = VTOTAL_B;
+ vblank_reg = VBLANK_B;
+ vsync_reg = VSYNC_B;
+ pipesrc_reg = PIPEBSRC;
+ dspstride_reg = DSPBSTRIDE;
+ dsplinoff_reg = DSPBLINOFF;
+ dsptileoff_reg = DSPBTILEOFF;
+ dspsize_reg = DSPBSIZE;
+ dsppos_reg = DSPBPOS;
+ dspsurf_reg = DSPBSURF;
+ dspcntr_reg = DSPBCNTR;
+ dspstatus_reg = PIPEBSTAT;
+ palette_reg = PALETTE_B;
+
+ /* values */
+ dpll_val = &regs->saveDPLL_B;
+ fp_val = &regs->saveFPB0;
+ pipeconf_val = &regs->savePIPEBCONF;
+ htot_val = &regs->saveHTOTAL_B;
+ hblank_val = &regs->saveHBLANK_B;
+ hsync_val = &regs->saveHSYNC_B;
+ vtot_val = &regs->saveVTOTAL_B;
+ vblank_val = &regs->saveVBLANK_B;
+ vsync_val = &regs->saveVSYNC_B;
+ pipesrc_val = &regs->savePIPEBSRC;
+ dspstride_val = &regs->saveDSPBSTRIDE;
+ dsplinoff_val = &regs->saveDSPBLINOFF;
+ dsptileoff_val = &regs->saveDSPBTILEOFF;
+ dspsize_val = &regs->saveDSPBSIZE;
+ dsppos_val = &regs->saveDSPBPOS;
+ dspsurf_val = &regs->saveDSPBSURF;
+ dspcntr_val = &regs->saveDSPBCNTR;
+ dspstatus_val = &regs->saveDSPBSTATUS;
+ palette_val = regs->save_palette_b;
+ break;
+ case 2:
+ /* register */
+ pipeconf_reg = PIPECCONF;
+ htot_reg = HTOTAL_C;
+ hblank_reg = HBLANK_C;
+ hsync_reg = HSYNC_C;
+ vtot_reg = VTOTAL_C;
+ vblank_reg = VBLANK_C;
+ vsync_reg = VSYNC_C;
+ pipesrc_reg = PIPECSRC;
+ dspstride_reg = DSPCSTRIDE;
+ dsplinoff_reg = DSPCLINOFF;
+ dsptileoff_reg = DSPCTILEOFF;
+ dspsize_reg = DSPCSIZE;
+ dsppos_reg = DSPCPOS;
+ dspsurf_reg = DSPCSURF;
+ mipi_reg = MIPI_C;
+ dspcntr_reg = DSPCCNTR;
+ dspstatus_reg = PIPECSTAT;
+ palette_reg = PALETTE_C;
+
+ /* pointer to values */
+ pipeconf_val = &regs->savePIPECCONF;
+ htot_val = &regs->saveHTOTAL_C;
+ hblank_val = &regs->saveHBLANK_C;
+ hsync_val = &regs->saveHSYNC_C;
+ vtot_val = &regs->saveVTOTAL_C;
+ vblank_val = &regs->saveVBLANK_C;
+ vsync_val = &regs->saveVSYNC_C;
+ pipesrc_val = &regs->savePIPECSRC;
+ dspstride_val = &regs->saveDSPCSTRIDE;
+ dsplinoff_val = &regs->saveDSPCLINOFF;
+ dsptileoff_val = &regs->saveDSPCTILEOFF;
+ dspsize_val = &regs->saveDSPCSIZE;
+ dsppos_val = &regs->saveDSPCPOS;
+ dspsurf_val = &regs->saveDSPCSURF;
+ mipi_val = &regs->saveMIPI_C;
+ dspcntr_val = &regs->saveDSPCCNTR;
+ dspstatus_val = &regs->saveDSPCSTATUS;
+ palette_val = regs->save_palette_c;
+ break;
+ default:
+ DRM_ERROR("%s, invalid pipe number.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Pipe & plane A info */
+ *dpll_val = PSB_RVDC32(dpll_reg);
+ *fp_val = PSB_RVDC32(fp_reg);
+ *pipeconf_val = PSB_RVDC32(pipeconf_reg);
+ *htot_val = PSB_RVDC32(htot_reg);
+ *hblank_val = PSB_RVDC32(hblank_reg);
+ *hsync_val = PSB_RVDC32(hsync_reg);
+ *vtot_val = PSB_RVDC32(vtot_reg);
+ *vblank_val = PSB_RVDC32(vblank_reg);
+ *vsync_val = PSB_RVDC32(vsync_reg);
+ *pipesrc_val = PSB_RVDC32(pipesrc_reg);
+ *dspstride_val = PSB_RVDC32(dspstride_reg);
+ *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
+ *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
+ *dspsize_val = PSB_RVDC32(dspsize_reg);
+ *dsppos_val = PSB_RVDC32(dsppos_reg);
+ *dspsurf_val = PSB_RVDC32(dspsurf_reg);
+ *dspcntr_val = PSB_RVDC32(dspcntr_reg);
+ *dspstatus_val = PSB_RVDC32(dspstatus_reg);
+
+ /*save palette (gamma) */
+ for (i = 0; i < 256; i++)
+ palette_val[i] = PSB_RVDC32(palette_reg + (i << 2));
+
+ if (pipe == 1) {
+ regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+
+ regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
+ regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
+ return 0;
+ }
+
+ *mipi_val = PSB_RVDC32(mipi_reg);
+ return 0;
+}
+
+/*
+ * mdfld_restore_display_registers
+ *
+ * Description: We are going to resume so restore display register state.
+ *
+ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
+ */
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+{
+ /* To get panel out of ULPS mode. */
+ u32 temp = 0;
+ u32 device_ready_reg = DEVICE_READY_REG;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dsi_config *dsi_config = NULL;
+ struct medfield_state *regs = &dev_priv->regs.mdfld;
+ u32 i = 0;
+ u32 dpll = 0;
+ u32 timeout = 0;
+
+ /* regester */
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 fp_reg = MRST_FPA0;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 htot_reg = HTOTAL_A;
+ u32 hblank_reg = HBLANK_A;
+ u32 hsync_reg = HSYNC_A;
+ u32 vtot_reg = VTOTAL_A;
+ u32 vblank_reg = VBLANK_A;
+ u32 vsync_reg = VSYNC_A;
+ u32 pipesrc_reg = PIPEASRC;
+ u32 dspstride_reg = DSPASTRIDE;
+ u32 dsplinoff_reg = DSPALINOFF;
+ u32 dsptileoff_reg = DSPATILEOFF;
+ u32 dspsize_reg = DSPASIZE;
+ u32 dsppos_reg = DSPAPOS;
+ u32 dspsurf_reg = DSPASURF;
+ u32 dspstatus_reg = PIPEASTAT;
+ u32 mipi_reg = MIPI;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 palette_reg = PALETTE_A;
+
+ /* values */
+ u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE;
+ u32 fp_val = regs->saveFPA0;
+ u32 pipeconf_val = regs->savePIPEACONF;
+ u32 htot_val = regs->saveHTOTAL_A;
+ u32 hblank_val = regs->saveHBLANK_A;
+ u32 hsync_val = regs->saveHSYNC_A;
+ u32 vtot_val = regs->saveVTOTAL_A;
+ u32 vblank_val = regs->saveVBLANK_A;
+ u32 vsync_val = regs->saveVSYNC_A;
+ u32 pipesrc_val = regs->savePIPEASRC;
+ u32 dspstride_val = regs->saveDSPASTRIDE;
+ u32 dsplinoff_val = regs->saveDSPALINOFF;
+ u32 dsptileoff_val = regs->saveDSPATILEOFF;
+ u32 dspsize_val = regs->saveDSPASIZE;
+ u32 dsppos_val = regs->saveDSPAPOS;
+ u32 dspsurf_val = regs->saveDSPASURF;
+ u32 dspstatus_val = regs->saveDSPASTATUS;
+ u32 mipi_val = regs->saveMIPI;
+ u32 dspcntr_val = regs->saveDSPACNTR;
+ u32 *palette_val = regs->save_palette_a;
+
+ switch (pipe) {
+ case 0:
+ dsi_config = dev_priv->dsi_configs[0];
+ break;
+ case 1:
+ /* regester */
+ dpll_reg = MDFLD_DPLL_B;
+ fp_reg = MDFLD_DPLL_DIV0;
+ pipeconf_reg = PIPEBCONF;
+ htot_reg = HTOTAL_B;
+ hblank_reg = HBLANK_B;
+ hsync_reg = HSYNC_B;
+ vtot_reg = VTOTAL_B;
+ vblank_reg = VBLANK_B;
+ vsync_reg = VSYNC_B;
+ pipesrc_reg = PIPEBSRC;
+ dspstride_reg = DSPBSTRIDE;
+ dsplinoff_reg = DSPBLINOFF;
+ dsptileoff_reg = DSPBTILEOFF;
+ dspsize_reg = DSPBSIZE;
+ dsppos_reg = DSPBPOS;
+ dspsurf_reg = DSPBSURF;
+ dspcntr_reg = DSPBCNTR;
+ dspstatus_reg = PIPEBSTAT;
+ palette_reg = PALETTE_B;
+
+ /* values */
+ dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
+ fp_val = regs->saveFPB0;
+ pipeconf_val = regs->savePIPEBCONF;
+ htot_val = regs->saveHTOTAL_B;
+ hblank_val = regs->saveHBLANK_B;
+ hsync_val = regs->saveHSYNC_B;
+ vtot_val = regs->saveVTOTAL_B;
+ vblank_val = regs->saveVBLANK_B;
+ vsync_val = regs->saveVSYNC_B;
+ pipesrc_val = regs->savePIPEBSRC;
+ dspstride_val = regs->saveDSPBSTRIDE;
+ dsplinoff_val = regs->saveDSPBLINOFF;
+ dsptileoff_val = regs->saveDSPBTILEOFF;
+ dspsize_val = regs->saveDSPBSIZE;
+ dsppos_val = regs->saveDSPBPOS;
+ dspsurf_val = regs->saveDSPBSURF;
+ dspcntr_val = regs->saveDSPBCNTR;
+ dspstatus_val = regs->saveDSPBSTATUS;
+ palette_val = regs->save_palette_b;
+ break;
+ case 2:
+ /* regester */
+ pipeconf_reg = PIPECCONF;
+ htot_reg = HTOTAL_C;
+ hblank_reg = HBLANK_C;
+ hsync_reg = HSYNC_C;
+ vtot_reg = VTOTAL_C;
+ vblank_reg = VBLANK_C;
+ vsync_reg = VSYNC_C;
+ pipesrc_reg = PIPECSRC;
+ dspstride_reg = DSPCSTRIDE;
+ dsplinoff_reg = DSPCLINOFF;
+ dsptileoff_reg = DSPCTILEOFF;
+ dspsize_reg = DSPCSIZE;
+ dsppos_reg = DSPCPOS;
+ dspsurf_reg = DSPCSURF;
+ mipi_reg = MIPI_C;
+ dspcntr_reg = DSPCCNTR;
+ dspstatus_reg = PIPECSTAT;
+ palette_reg = PALETTE_C;
+
+ /* values */
+ pipeconf_val = regs->savePIPECCONF;
+ htot_val = regs->saveHTOTAL_C;
+ hblank_val = regs->saveHBLANK_C;
+ hsync_val = regs->saveHSYNC_C;
+ vtot_val = regs->saveVTOTAL_C;
+ vblank_val = regs->saveVBLANK_C;
+ vsync_val = regs->saveVSYNC_C;
+ pipesrc_val = regs->savePIPECSRC;
+ dspstride_val = regs->saveDSPCSTRIDE;
+ dsplinoff_val = regs->saveDSPCLINOFF;
+ dsptileoff_val = regs->saveDSPCTILEOFF;
+ dspsize_val = regs->saveDSPCSIZE;
+ dsppos_val = regs->saveDSPCPOS;
+ dspsurf_val = regs->saveDSPCSURF;
+ mipi_val = regs->saveMIPI_C;
+ dspcntr_val = regs->saveDSPCCNTR;
+ dspstatus_val = regs->saveDSPCSTATUS;
+ palette_val = regs->save_palette_c;
+
+ dsi_config = dev_priv->dsi_configs[1];
+ break;
+ default:
+ DRM_ERROR("%s, invalid pipe number.\n", __func__);
+ return -EINVAL;
+ }
+
+ /*make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ if (pipe == 1) {
+ PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
+ PSB_RVDC32(dpll_reg);
+
+ PSB_WVDC32(fp_val, fp_reg);
+ } else {
+
+ dpll = PSB_RVDC32(dpll_reg);
+
+ if (!(dpll & DPLL_VCO_ENABLE)) {
+
+ /* When ungating power of DPLL, needs to wait 0.5us
+ before enable the VCO */
+ if (dpll & MDFLD_PWR_GATE_EN) {
+ dpll &= ~MDFLD_PWR_GATE_EN;
+ PSB_WVDC32(dpll, dpll_reg);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ PSB_WVDC32(fp_val, fp_reg);
+ PSB_WVDC32(dpll_val, dpll_reg);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ dpll_val |= DPLL_VCO_ENABLE;
+ PSB_WVDC32(dpll_val, dpll_reg);
+ PSB_RVDC32(dpll_reg);
+
+ /* wait for DSI PLL to lock */
+ while (timeout < 20000 &&
+ !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout++;
+ }
+
+ if (timeout == 20000) {
+ DRM_ERROR("%s, can't lock DSIPLL.\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ }
+ /* Restore mode */
+ PSB_WVDC32(htot_val, htot_reg);
+ PSB_WVDC32(hblank_val, hblank_reg);
+ PSB_WVDC32(hsync_val, hsync_reg);
+ PSB_WVDC32(vtot_val, vtot_reg);
+ PSB_WVDC32(vblank_val, vblank_reg);
+ PSB_WVDC32(vsync_val, vsync_reg);
+ PSB_WVDC32(pipesrc_val, pipesrc_reg);
+ PSB_WVDC32(dspstatus_val, dspstatus_reg);
+
+ /*set up the plane*/
+ PSB_WVDC32(dspstride_val, dspstride_reg);
+ PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
+ PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
+ PSB_WVDC32(dspsize_val, dspsize_reg);
+ PSB_WVDC32(dsppos_val, dsppos_reg);
+ PSB_WVDC32(dspsurf_val, dspsurf_reg);
+
+ if (pipe == 1) {
+ /* restore palette (gamma) */
+ /*DRM_UDELAY(50000); */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+
+ PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+
+ /*TODO: resume HDMI port */
+
+ /*TODO: resume pipe*/
+
+ /*enable the plane*/
+ PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg);
+
+ return 0;
+ }
+
+ /*set up pipe related registers*/
+ PSB_WVDC32(mipi_val, mipi_reg);
+
+ /*setup MIPI adapter + MIPI IP registers*/
+ if (dsi_config)
+ mdfld_dsi_controller_init(dsi_config, pipe);
+
+ if (in_atomic() || in_interrupt())
+ mdelay(20);
+ else
+ msleep(20);
+
+ /*enable the plane*/
+ PSB_WVDC32(dspcntr_val, dspcntr_reg);
+
+ if (in_atomic() || in_interrupt())
+ mdelay(20);
+ else
+ msleep(20);
+
+ /* LP Hold Release */
+ temp = REG_READ(mipi_reg);
+ temp |= LP_OUTPUT_HOLD_RELEASE;
+ REG_WRITE(mipi_reg, temp);
+ mdelay(1);
+
+
+ /* Set DSI host to exit from Utra Low Power State */
+ temp = REG_READ(device_ready_reg);
+ temp &= ~ULPS_MASK;
+ temp |= 0x3;
+ temp |= EXIT_ULPS_DEV_READY;
+ REG_WRITE(device_ready_reg, temp);
+ mdelay(1);
+
+ temp = REG_READ(device_ready_reg);
+ temp &= ~ULPS_MASK;
+ temp |= EXITING_ULPS;
+ REG_WRITE(device_ready_reg, temp);
+ mdelay(1);
+
+ /*enable the pipe*/
+ PSB_WVDC32(pipeconf_val, pipeconf_reg);
+
+ /* restore palette (gamma) */
+ /*DRM_UDELAY(50000); */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+
+ return 0;
+}
+
+static int mdfld_save_registers(struct drm_device *dev)
+{
+ /* mdfld_save_cursor_overlay_registers(dev); */
+ mdfld_save_display_registers(dev, 0);
+ mdfld_save_display_registers(dev, 2);
+ mdfld_disable_crtc(dev, 0);
+ mdfld_disable_crtc(dev, 2);
+
+ return 0;
+}
+
+static int mdfld_restore_registers(struct drm_device *dev)
+{
+ mdfld_restore_display_registers(dev, 2);
+ mdfld_restore_display_registers(dev, 0);
+ /* mdfld_restore_cursor_overlay_registers(dev); */
+
+ return 0;
+}
+
+static int mdfld_power_down(struct drm_device *dev)
+{
+ /* FIXME */
+ return 0;
+}
+
+static int mdfld_power_up(struct drm_device *dev)
+{
+ /* FIXME */
+ return 0;
+}
+
+const struct psb_ops mdfld_chip_ops = {
+ .name = "mdfld",
+ .accel_2d = 0,
+ .pipes = 3,
+ .crtcs = 3,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = mid_chip_setup,
+ .crtc_helper = &mdfld_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = mdfld_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = mdfld_backlight_init,
+#endif
+
+ .save_regs = mdfld_save_registers,
+ .restore_regs = mdfld_restore_registers,
+ .power_down = mdfld_power_down,
+ .power_up = mdfld_power_up,
+};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
new file mode 100644
index 0000000..d52358b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -0,0 +1,1017 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "psb_drv.h"
+#include "tc35876x-dsi-lvds.h"
+
+static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
+ int pipe);
+
+static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) &&
+ (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg)
+ & DSI_FIFO_GEN_HS_CTRL_FULL)) {
+ udelay(100);
+ timeout++;
+ }
+ if (timeout == 20000)
+ DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) &
+ DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ DRM_ERROR("MIPI: DPI FIFO was never cleared\n");
+}
+
+static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
+{
+ u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
+ int timeout = 0;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && (!(REG_READ(intr_stat_reg)
+ & DSI_INTR_STATE_SPL_PKG_SENT))) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
+}
+
+/* For TC35876X */
+
+static void dsi_set_device_ready_state(struct drm_device *dev, int state,
+ int pipe)
+{
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0);
+}
+
+static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
+ int state, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+
+ u32 dspcntr = dev_priv->dspcntr[pipe];
+ u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+ if (pipe) {
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ } else
+ mipi &= (~0x03);
+
+ if (state) {
+ /*Set up pipe */
+ REG_WRITE(pipeconf_reg, BIT(31));
+
+ if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
+ dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
+ __func__);
+
+ /*Set up display plane */
+ REG_WRITE(dspcntr_reg, dspcntr);
+ } else {
+ u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE;
+
+ /* Put DSI lanes to ULPS to disable pipe */
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1);
+ REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */
+
+ /* LP Hold */
+ REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16);
+ REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */
+
+ /* Disable display plane */
+ REG_FLD_MOD(dspcntr_reg, 0, 31, 31);
+
+ /* Flush the plane changes ??? posted write? */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+
+ /* Disable PIPE */
+ REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
+
+ if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
+ dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
+ __func__);
+
+ if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
+ dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
+ __func__);
+ }
+}
+
+static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder,
+ int pipe)
+{
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->dpi_panel_on[pipe]) {
+ dev_err(dev->dev, "DPI panel is already off\n");
+ return;
+ }
+ tc35876x_toshiba_bridge_panel_off(dev);
+ tc35876x_set_bridge_reset_state(dev, 1);
+ dsi_set_pipe_plane_enable_state(dev, 0, pipe);
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ dsi_set_device_ready_state(dev, 0, pipe);
+}
+
+static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder,
+ int pipe)
+{
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->dpi_panel_on[pipe]) {
+ dev_err(dev->dev, "DPI panel is already on\n");
+ return;
+ }
+
+ /* For resume path sequence */
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ dsi_set_device_ready_state(dev, 0, pipe);
+
+ dsi_set_device_ready_state(dev, 1, pipe);
+ tc35876x_set_bridge_reset_state(dev, 0);
+ tc35876x_configure_lvds_bridge(dev);
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */
+ dsi_set_pipe_plane_enable_state(dev, 1, pipe);
+}
+/* End for TC35876X */
+
+/* ************************************************************************* *\
+ * FUNCTION: mdfld_dsi_tpo_ic_init
+ *
+ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
+ * restore_display_registers. since this function does not
+ * acquire the mutex, it is important that the calling function
+ * does!
+\* ************************************************************************* */
+static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ u32 dcsChannelNumber = dsi_config->channel_num;
+ u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
+ u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
+ u32 gen_ctrl_val = GEN_LONG_WRITE;
+
+ DRM_INFO("Enter mrst init TPO MIPI display.\n");
+
+ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
+
+ /* Flip page order */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00008036);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+ /* 0xF0 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5af0);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* Write protection key */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5af1);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xFC */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5afc);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xB7 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x770000b7);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000044);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
+
+ /* 0xB6 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000a0ab6);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xF2 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x081010f2);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x4a070708);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000000c5);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xF8 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x024003f8);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x01030a04);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x0e020220);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000004);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
+
+ /* 0xE2 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x398fc3e2);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x0000916f);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
+
+ /* 0xB0 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000000b0);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+ /* 0xF4 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x240242f4);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x78ee2002);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2a071050);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x507fee10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x10300710);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
+
+ /* 0xBA */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x19fe07ba);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x101c0a31);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000010);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xBB */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x28ff07bb);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x24280a31);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000034);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xFB */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535d05fb);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1b1a2130);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x221e180e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x131d2120);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535d0508);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1c1a2131);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x231f160d);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x111b2220);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535c2008);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1f1d2433);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2c251a10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2c34372d);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000023);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+ /* 0xFA */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x525c0bfa);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1c1c232f);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2623190e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x18212625);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x545d0d0e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1e1d2333);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x26231a10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1a222725);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x545d280f);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x21202635);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x31292013);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x31393d33);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000029);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+ /* Set DM */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000100f7);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+}
+
+static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
+ int num_lane, int bpp)
+{
+ return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
+}
+
+/*
+ * Calculate the dpi time basing on a given drm mode @mode
+ * return 0 on success.
+ * FIXME: I was using proposed mode value for calculation, may need to
+ * use crtc mode values later
+ */
+int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+ struct mdfld_dsi_dpi_timing *dpi_timing,
+ int num_lane, int bpp)
+{
+ int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
+ int pclk_vsync, pclk_vfp, pclk_vbp;
+
+ pclk_hactive = mode->hdisplay;
+ pclk_hfp = mode->hsync_start - mode->hdisplay;
+ pclk_hsync = mode->hsync_end - mode->hsync_start;
+ pclk_hbp = mode->htotal - mode->hsync_end;
+
+ pclk_vfp = mode->vsync_start - mode->vdisplay;
+ pclk_vsync = mode->vsync_end - mode->vsync_start;
+ pclk_vbp = mode->vtotal - mode->vsync_end;
+
+ /*
+ * byte clock counts were calculated by following formula
+ * bclock_count = pclk_count * bpp / num_lane / 8
+ */
+ dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hsync, num_lane, bpp);
+ dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hbp, num_lane, bpp);
+ dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hfp, num_lane, bpp);
+ dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_hactive, num_lane, bpp);
+ dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_vsync, num_lane, bpp);
+ dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_vbp, num_lane, bpp);
+ dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(
+ pclk_vfp, num_lane, bpp);
+
+ return 0;
+}
+
+void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ int lane_count = dsi_config->lane_count;
+ struct mdfld_dsi_dpi_timing dpi_timing;
+ struct drm_display_mode *mode = dsi_config->mode;
+ u32 val;
+
+ /*un-ready device*/
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);
+
+ /*init dsi adapter before kicking off*/
+ REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
+
+ /*enable all interrupts*/
+ REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
+
+ /*set up func_prg*/
+ val = lane_count;
+ val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
+
+ switch (dsi_config->bpp) {
+ case 16:
+ val |= DSI_DPI_COLOR_FORMAT_RGB565;
+ break;
+ case 18:
+ val |= DSI_DPI_COLOR_FORMAT_RGB666;
+ break;
+ case 24:
+ val |= DSI_DPI_COLOR_FORMAT_RGB888;
+ break;
+ default:
+ DRM_ERROR("unsupported color format, bpp = %d\n",
+ dsi_config->bpp);
+ }
+ REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);
+
+ REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
+ (mode->vtotal * mode->htotal * dsi_config->bpp /
+ (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
+ REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
+ 0xffff & DSI_LP_RX_TIMEOUT_MASK);
+
+ /*max value: 20 clock cycles of txclkesc*/
+ REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
+ 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
+
+ /*min 21 txclkesc, max: ffffh*/
+ REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
+ 0xffff & DSI_RESET_TIMER_MASK);
+
+ REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
+ mode->vdisplay << 16 | mode->hdisplay);
+
+ /*set DPI timing registers*/
+ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
+ dsi_config->lane_count, dsi_config->bpp);
+
+ REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
+ dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
+ dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
+ dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
+ dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
+ dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
+ dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
+ dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+
+ REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);
+
+ /*min: 7d0 max: 4e20*/
+ REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);
+
+ /*set up video mode*/
+ val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
+ REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);
+
+ REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
+
+ REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
+
+ /*TODO: figure out how to setup these registers*/
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
+ else
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);
+
+ REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
+
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
+
+ /*set device ready*/
+ REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
+}
+
+void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
+{
+ struct drm_device *dev = output->dev;
+
+ /* clear special packet sent bit */
+ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+ REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+ DSI_INTR_STATE_SPL_PKG_SENT);
+
+ /*send turn on package*/
+ REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON);
+
+ /*wait for SPL_PKG_SENT interrupt*/
+ mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
+
+ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+ REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+ DSI_INTR_STATE_SPL_PKG_SENT);
+
+ output->panel_on = 1;
+
+ /* FIXME the following is disabled to WA the X slow start issue
+ for TMD panel
+ if (pipe == 2)
+ dev_priv->dpi_panel_on2 = true;
+ else if (pipe == 0)
+ dev_priv->dpi_panel_on = true; */
+}
+
+static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
+ int pipe)
+{
+ struct drm_device *dev = output->dev;
+
+ /*if output is on, or mode setting didn't happen, ignore this*/
+ if ((!output->panel_on) || output->first_boot) {
+ output->first_boot = 0;
+ return;
+ }
+
+ /* Wait for dpi fifo to empty */
+ mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
+
+ /* Clear the special packet interrupt bit if set */
+ if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+ REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+ DSI_INTR_STATE_SPL_PKG_SENT);
+
+ if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN)
+ goto shutdown_out;
+
+ REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN);
+
+shutdown_out:
+ output->panel_on = 0;
+ output->first_boot = 0;
+
+ /* FIXME the following is disabled to WA the X slow start issue
+ for TMD panel
+ if (pipe == 2)
+ dev_priv->dpi_panel_on2 = false;
+ else if (pipe == 0)
+ dev_priv->dpi_panel_on = false; */
+}
+
+static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /*start up display island if it was shutdown*/
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ mdfld_dsi_configure_up(dsi_encoder, pipe);
+ else {
+ /*enable mipi port*/
+ REG_WRITE(MIPI_PORT_CONTROL(pipe),
+ REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31));
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+ }
+ dev_priv->dpi_panel_on[pipe] = true;
+ } else {
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ mdfld_dsi_configure_down(dsi_encoder, pipe);
+ else {
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+
+ /*disable mipi port*/
+ REG_WRITE(MIPI_PORT_CONTROL(pipe),
+ REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31));
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+ }
+ dev_priv->dpi_panel_on[pipe] = false;
+ }
+ gma_power_end(dev);
+}
+
+void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
+{
+ mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+
+ if (fixed_mode) {
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ }
+ return true;
+}
+
+void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
+{
+ mdfld_dsi_dpi_set_power(encoder, false);
+}
+
+void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
+{
+ mdfld_dsi_dpi_set_power(encoder, true);
+}
+
+/* For TC35876X */
+/* This functionality was implemented in FW in iCDK */
+/* But removed in DV0 and later. So need to add here. */
+static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+
+ REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
+ REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
+ REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff);
+ REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff);
+ REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14);
+ REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff);
+ REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25);
+ REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0);
+ REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
+ REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
+ REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820);
+ REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
+}
+
+static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ struct mdfld_dsi_dpi_timing dpi_timing;
+ struct drm_display_mode *mode = dsi_config->mode;
+
+ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
+ dsi_config->lane_count,
+ dsi_config->bpp);
+
+ REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
+ mode->vdisplay << 16 | mode->hdisplay);
+ REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
+ dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
+ dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
+ dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
+ dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
+ dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
+ dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
+ dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+}
+
+static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ int lane_count = dsi_config->lane_count;
+
+ if (pipe) {
+ REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002);
+ REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000);
+ } else {
+ REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000);
+ REG_WRITE(MIPI_PORT_CONTROL(2), 0x00);
+ }
+
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F);
+ REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F);
+
+ /* lane_count = 3 */
+ REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count);
+
+ mdfld_mipi_set_video_timing(dsi_config, pipe);
+}
+
+static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_display_mode *mode = dsi_config->mode;
+
+ REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(HSYNC_A,
+ ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1));
+
+ REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(VSYNC_A,
+ ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1));
+
+ REG_WRITE(PIPEASRC,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+/* End for TC35876X */
+
+void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+ struct mdfld_dsi_dpi_output *dpi_output =
+ MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+
+ u32 pipeconf = dev_priv->pipeconf[pipe];
+ u32 dspcntr = dev_priv->dspcntr[pipe];
+ u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+ if (pipe) {
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ } else {
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ mipi &= (~0x03); /* Use all four lanes */
+ else
+ mipi |= 2;
+ }
+
+ /*start up display island if it was shutdown*/
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+ /*
+ * The following logic is required to reset the bridge and
+ * configure. This also starts the DSI clock at 200MHz.
+ */
+ tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
+ tc35876x_toshiba_bridge_panel_on(dev);
+ udelay(100);
+ /* Now start the DSI clock */
+ REG_WRITE(MRST_DPLL_A, 0x00);
+ REG_WRITE(MRST_FPA0, 0xC1);
+ REG_WRITE(MRST_DPLL_A, 0x00800000);
+ udelay(500);
+ REG_WRITE(MRST_DPLL_A, 0x80800000);
+
+ if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
+ dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
+ __func__);
+
+ REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
+
+ mipi_set_properties(dsi_config, pipe);
+ mdfld_mipi_config(dsi_config, pipe);
+ mdfld_set_pipe_timing(dsi_config, pipe);
+
+ REG_WRITE(DSPABASE, 0x00);
+ REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4));
+ REG_WRITE(DSPASIZE,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+
+ REG_WRITE(DSPACNTR, 0x98000000);
+ REG_WRITE(DSPASURF, 0x00);
+
+ REG_WRITE(VGACNTRL, 0x80000000);
+ REG_WRITE(DEVICE_READY_REG, 0x00000001);
+
+ REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000);
+ } else {
+ /*set up mipi port FIXME: do at init time */
+ REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi);
+ }
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+ /* NOP */
+ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+ /* set up DSI controller DPI interface */
+ mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+
+ /* Configure MIPI Bridge and Panel */
+ tc35876x_configure_lvds_bridge(dev);
+ dev_priv->dpi_panel_on[pipe] = true;
+ } else {
+ /*turn on DPI interface*/
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ }
+
+ /*set up pipe*/
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ /*set up display plane*/
+ REG_WRITE(dspcntr_reg, dspcntr);
+ REG_READ(dspcntr_reg);
+
+ msleep(20); /* FIXME: this should wait for vblank */
+
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+ /* NOP */
+ } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ } else {
+ /* init driver ic */
+ mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+ /*init backlight*/
+ mdfld_dsi_brightness_init(dsi_config, pipe);
+ }
+
+ gma_power_end(dev);
+}
+
+/*
+ * Init DSI DPI encoder.
+ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+ * return pointer of newly allocated DPI encoder, NULL on error
+ */
+struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ const struct panel_funcs *p_funcs)
+{
+ struct mdfld_dsi_dpi_output *dpi_output = NULL;
+ struct mdfld_dsi_config *dsi_config;
+ struct drm_connector *connector = NULL;
+ struct drm_encoder *encoder = NULL;
+ int pipe;
+ u32 data;
+ int ret;
+
+ pipe = dsi_connector->pipe;
+
+ if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
+ dsi_config = mdfld_dsi_get_config(dsi_connector);
+
+ /* panel hard-reset */
+ if (p_funcs->reset) {
+ ret = p_funcs->reset(pipe);
+ if (ret) {
+ DRM_ERROR("Panel %d hard-reset failed\n", pipe);
+ return NULL;
+ }
+ }
+
+ /* panel drvIC init */
+ if (p_funcs->drv_ic_init)
+ p_funcs->drv_ic_init(dsi_config, pipe);
+
+ /* panel power mode detect */
+ ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
+ if (ret) {
+ DRM_ERROR("Panel %d get power mode failed\n", pipe);
+ dsi_connector->status = connector_status_disconnected;
+ } else {
+ DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
+ dsi_connector->status = connector_status_connected;
+ }
+ }
+
+ dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
+ if (!dpi_output) {
+ DRM_ERROR("No memory\n");
+ return NULL;
+ }
+
+ if (dsi_connector->pipe)
+ dpi_output->panel_on = 0;
+ else
+ dpi_output->panel_on = 0;
+
+ dpi_output->dev = dev;
+ if (mdfld_get_panel_type(dev, pipe) != TC35876X)
+ dpi_output->p_funcs = p_funcs;
+ dpi_output->first_boot = 1;
+
+ /*get fixed mode*/
+ dsi_config = mdfld_dsi_get_config(dsi_connector);
+
+ /*create drm encoder object*/
+ connector = &dsi_connector->base.base;
+ encoder = &dpi_output->base.base.base;
+ drm_encoder_init(dev,
+ encoder,
+ p_funcs->encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(encoder,
+ p_funcs->encoder_helper_funcs);
+
+ /*attach to given connector*/
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ /*set possible crtcs and clones*/
+ if (dsi_connector->pipe) {
+ encoder->possible_crtcs = (1 << 2);
+ encoder->possible_clones = (1 << 1);
+ } else {
+ encoder->possible_crtcs = (1 << 0);
+ encoder->possible_clones = (1 << 0);
+ }
+
+ dsi_connector->base.encoder = &dpi_output->base.base;
+
+ return &dpi_output->base;
+}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
new file mode 100644
index 0000000..6f76247
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DPI_H__
+#define __MDFLD_DSI_DPI_H__
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+struct mdfld_dsi_dpi_timing {
+ u16 hsync_count;
+ u16 hbp_count;
+ u16 hfp_count;
+ u16 hactive_count;
+ u16 vsync_count;
+ u16 vbp_count;
+ u16 vfp_count;
+};
+
+struct mdfld_dsi_dpi_output {
+ struct mdfld_dsi_encoder base;
+ struct drm_device *dev;
+
+ int panel_on;
+ int first_boot;
+
+ const struct panel_funcs *p_funcs;
+};
+
+#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\
+ container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
+
+/* Export functions */
+extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+ struct mdfld_dsi_dpi_timing *dpi_timing,
+ int num_lane, int bpp);
+extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ const struct panel_funcs *p_funcs);
+
+/* MDFLD DPI helper functions */
+extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
+extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
+ int pipe);
+extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
new file mode 100644
index 0000000..4c2cb4a
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/module.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "tc35876x-dsi-lvds.h"
+#include <linux/pm_runtime.h>
+#include <asm/intel_scu_ipc.h>
+
+/* get the LABC from command line. */
+static int LABC_control = 1;
+
+#ifdef MODULE
+module_param(LABC_control, int, 0644);
+#else
+
+static int __init parse_LABC_control(char *arg)
+{
+ /* LABC control can be passed in as a cmdline parameter */
+ /* to enable this feature add LABC=1 to cmdline */
+ /* to disable this feature add LABC=0 to cmdline */
+ if (!arg)
+ return -EINVAL;
+
+ if (!strcasecmp(arg, "0"))
+ LABC_control = 0;
+ else if (!strcasecmp(arg, "1"))
+ LABC_control = 1;
+
+ return 0;
+}
+early_param("LABC", parse_LABC_control);
+#endif
+
+/**
+ * Check and see if the generic control or data buffer is empty and ready.
+ */
+void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
+ u32 fifo_stat)
+{
+ u32 GEN_BF_time_out_count;
+
+ /* Check MIPI Adatper command registers */
+ for (GEN_BF_time_out_count = 0;
+ GEN_BF_time_out_count < GEN_FB_TIME_OUT;
+ GEN_BF_time_out_count++) {
+ if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
+ break;
+ udelay(100);
+ }
+
+ if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
+ DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n",
+ gen_fifo_stat_reg);
+}
+
+/**
+ * Manage the DSI MIPI keyboard and display brightness.
+ * FIXME: this is exported to OSPM code. should work out an specific
+ * display interface to OSPM.
+ */
+
+void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_get_pkg_sender(dsi_config);
+ struct drm_device *dev = sender->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 gen_ctrl_val;
+
+ if (!sender) {
+ DRM_ERROR("No sender found\n");
+ return;
+ }
+
+ /* Set default display backlight value to 85% (0xd8)*/
+ mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
+ true);
+
+ /* Set minimum brightness setting of CABC function to 20% (0x33)*/
+ mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true);
+
+ /* Enable backlight or/and LABC */
+ gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON |
+ BACKLIGHT_ON;
+ if (LABC_control == 1)
+ gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO
+ | GAMMA_AUTO;
+
+ if (LABC_control == 1)
+ gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
+
+ dev_priv->mipi_ctrl_display = gen_ctrl_val;
+
+ mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val,
+ 1, true);
+
+ mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true);
+}
+
+void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
+{
+ struct mdfld_dsi_pkg_sender *sender;
+ struct drm_psb_private *dev_priv;
+ struct mdfld_dsi_config *dsi_config;
+ u32 gen_ctrl_val = 0;
+ int p_type = TMD_VID;
+
+ if (!dev || (pipe != 0 && pipe != 2)) {
+ DRM_ERROR("Invalid parameter\n");
+ return;
+ }
+
+ p_type = mdfld_get_panel_type(dev, 0);
+
+ dev_priv = dev->dev_private;
+
+ if (pipe)
+ dsi_config = dev_priv->dsi_configs[1];
+ else
+ dsi_config = dev_priv->dsi_configs[0];
+
+ sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if (!sender) {
+ DRM_ERROR("No sender found\n");
+ return;
+ }
+
+ gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
+
+ dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n",
+ pipe, gen_ctrl_val);
+
+ if (p_type == TMD_VID) {
+ /* Set display backlight value */
+ mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness,
+ (u8)gen_ctrl_val, 1, true);
+ } else {
+ /* Set display backlight value */
+ mdfld_dsi_send_mcs_short(sender, write_display_brightness,
+ (u8)gen_ctrl_val, 1, true);
+
+ /* Enable backlight control */
+ if (level == 0)
+ gen_ctrl_val = 0;
+ else
+ gen_ctrl_val = dev_priv->mipi_ctrl_display;
+
+ mdfld_dsi_send_mcs_short(sender, write_ctrl_display,
+ (u8)gen_ctrl_val, 1, true);
+ }
+}
+
+static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
+ u8 dcs, u32 *data, bool hs)
+{
+ struct mdfld_dsi_pkg_sender *sender
+ = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if (!sender || !data) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs);
+}
+
+int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode,
+ bool hs)
+{
+ if (!dsi_config || !mode) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs);
+}
+
+/*
+ * NOTE: this function was used by OSPM.
+ * TODO: will be removed later, should work out display interfaces for OSPM
+ */
+void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
+ DRM_ERROR("Invalid parameters\n");
+ return;
+ }
+
+ mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+}
+
+static void mdfld_dsi_connector_save(struct drm_connector *connector)
+{
+}
+
+static void mdfld_dsi_connector_restore(struct drm_connector *connector)
+{
+}
+
+/* FIXME: start using the force parameter */
+static enum drm_connector_status
+mdfld_dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct mdfld_dsi_connector *dsi_connector
+ = mdfld_dsi_connector(connector);
+
+ dsi_connector->status = connector_status_connected;
+
+ return dsi_connector->status;
+}
+
+static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+ struct backlight_device *psb_bd;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc *psb_crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ bool centerechange;
+ uint64_t val;
+
+ if (!psb_crtc)
+ goto set_prop_error;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ goto set_prop_error;
+ }
+
+ if (drm_connector_property_get_value(connector, property, &val))
+ goto set_prop_error;
+
+ if (val == value)
+ goto set_prop_done;
+
+ if (drm_connector_property_set_value(connector,
+ property, value))
+ goto set_prop_error;
+
+ centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
+ (value == DRM_MODE_SCALE_NO_SCALE);
+
+ if (psb_crtc->saved_mode.hdisplay != 0 &&
+ psb_crtc->saved_mode.vdisplay != 0) {
+ if (centerechange) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &psb_crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->fb))
+ goto set_prop_error;
+ } else {
+ struct drm_encoder_helper_funcs *funcs =
+ encoder->helper_private;
+ funcs->mode_set(encoder,
+ &psb_crtc->saved_mode,
+ &psb_crtc->saved_adjusted_mode);
+ }
+ }
+ } else if (!strcmp(property->name, "backlight") && encoder) {
+ if (drm_connector_property_set_value(connector, property,
+ value))
+ goto set_prop_error;
+ else {
+ psb_bd = mdfld_get_backlight_device();
+ if (psb_bd) {
+ psb_bd->props.brightness = value;
+ mdfld_set_brightness(psb_bd);
+ }
+ }
+ }
+set_prop_done:
+ return 0;
+set_prop_error:
+ return -1;
+}
+
+static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_pkg_sender *sender;
+
+ if (!dsi_connector)
+ return;
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ sender = dsi_connector->pkg_sender;
+ mdfld_dsi_pkg_sender_destroy(sender);
+ kfree(dsi_connector);
+}
+
+static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+ struct drm_display_mode *dup_mode = NULL;
+ struct drm_device *dev = connector->dev;
+
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+
+ if (fixed_mode) {
+ dev_dbg(dev->dev, "fixed_mode %dx%d\n",
+ fixed_mode->hdisplay, fixed_mode->vdisplay);
+ dup_mode = drm_mode_duplicate(dev, fixed_mode);
+ drm_mode_probed_add(connector, dup_mode);
+ return 1;
+ }
+ DRM_ERROR("Didn't get any modes!\n");
+ return 0;
+}
+
+static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /**
+ * FIXME: current DC has no fitting unit, reject any mode setting
+ * request
+ * Will figure out a way to do up-scaling(pannel fitting) later.
+ **/
+ if (fixed_mode) {
+ if (mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (mode == connector->dpms)
+ return;
+
+ /*first, execute dpms*/
+
+ drm_helper_connector_dpms(connector, mode);
+}
+
+static struct drm_encoder *mdfld_dsi_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct mdfld_dsi_connector *dsi_connector =
+ mdfld_dsi_connector(connector);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ return &dsi_config->encoder->base.base;
+}
+
+/*DSI connector funcs*/
+static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
+ .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+ .save = mdfld_dsi_connector_save,
+ .restore = mdfld_dsi_connector_restore,
+ .detect = mdfld_dsi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = mdfld_dsi_connector_set_property,
+ .destroy = mdfld_dsi_connector_destroy,
+};
+
+/*DSI connector helper funcs*/
+static const struct drm_connector_helper_funcs
+ mdfld_dsi_connector_helper_funcs = {
+ .get_modes = mdfld_dsi_connector_get_modes,
+ .mode_valid = mdfld_dsi_connector_mode_valid,
+ .best_encoder = mdfld_dsi_connector_best_encoder,
+};
+
+static int mdfld_dsi_get_default_config(struct drm_device *dev,
+ struct mdfld_dsi_config *config, int pipe)
+{
+ if (!dev || !config) {
+ DRM_ERROR("Invalid parameters");
+ return -EINVAL;
+ }
+
+ config->bpp = 24;
+ if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ config->lane_count = 4;
+ else
+ config->lane_count = 2;
+ config->channel_num = 0;
+
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+ config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
+ else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+ config->video_mode =
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS;
+ else
+ config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
+
+ return 0;
+}
+
+int mdfld_dsi_panel_reset(int pipe)
+{
+ unsigned gpio;
+ int ret = 0;
+
+ switch (pipe) {
+ case 0:
+ gpio = 128;
+ break;
+ case 2:
+ gpio = 34;
+ break;
+ default:
+ DRM_ERROR("Invalid output\n");
+ return -EINVAL;
+ }
+
+ ret = gpio_request(gpio, "gfx");
+ if (ret) {
+ DRM_ERROR("gpio_rqueset failed\n");
+ return ret;
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret) {
+ DRM_ERROR("gpio_direction_output failed\n");
+ goto gpio_error;
+ }
+
+ gpio_get_value(128);
+
+gpio_error:
+ if (gpio_is_valid(gpio))
+ gpio_free(gpio);
+
+ return ret;
+}
+
+/*
+ * MIPI output init
+ * @dev drm device
+ * @pipe pipe number. 0 or 2
+ * @config
+ *
+ * Do the initialization of a MIPI output, including create DRM mode objects
+ * initialization of DSI output on @pipe
+ */
+void mdfld_dsi_output_init(struct drm_device *dev,
+ int pipe,
+ const struct panel_funcs *p_vid_funcs)
+{
+ struct mdfld_dsi_config *dsi_config;
+ struct mdfld_dsi_connector *dsi_connector;
+ struct drm_connector *connector;
+ struct mdfld_dsi_encoder *encoder;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct panel_info dsi_panel_info;
+ u32 width_mm, height_mm;
+
+ dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
+
+ if (!dev || ((pipe != 0) && (pipe != 2))) {
+ DRM_ERROR("Invalid parameter\n");
+ return;
+ }
+
+ /*create a new connetor*/
+ dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
+ if (!dsi_connector) {
+ DRM_ERROR("No memory");
+ return;
+ }
+
+ dsi_connector->pipe = pipe;
+
+ dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
+ GFP_KERNEL);
+ if (!dsi_config) {
+ DRM_ERROR("cannot allocate memory for DSI config\n");
+ goto dsi_init_err0;
+ }
+ mdfld_dsi_get_default_config(dev, dsi_config, pipe);
+
+ dsi_connector->private = dsi_config;
+
+ dsi_config->changed = 1;
+ dsi_config->dev = dev;
+
+ dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
+ if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
+ goto dsi_init_err0;
+
+ width_mm = dsi_panel_info.width_mm;
+ height_mm = dsi_panel_info.height_mm;
+
+ dsi_config->mode = dsi_config->fixed_mode;
+ dsi_config->connector = dsi_connector;
+
+ if (!dsi_config->fixed_mode) {
+ DRM_ERROR("No pannel fixed mode was found\n");
+ goto dsi_init_err0;
+ }
+
+ if (pipe && dev_priv->dsi_configs[0]) {
+ dsi_config->dvr_ic_inited = 0;
+ dev_priv->dsi_configs[1] = dsi_config;
+ } else if (pipe == 0) {
+ dsi_config->dvr_ic_inited = 1;
+ dev_priv->dsi_configs[0] = dsi_config;
+ } else {
+ DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
+ goto dsi_init_err0;
+ }
+
+
+ connector = &dsi_connector->base.base;
+ drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->display_info.width_mm = width_mm;
+ connector->display_info.height_mm = height_mm;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*attach properties*/
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+ /*init DSI package sender on this output*/
+ if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
+ DRM_ERROR("Package Sender initialization failed on pipe %d\n",
+ pipe);
+ goto dsi_init_err0;
+ }
+
+ encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
+ if (!encoder) {
+ DRM_ERROR("Create DPI encoder failed\n");
+ goto dsi_init_err1;
+ }
+ encoder->private = dsi_config;
+ dsi_config->encoder = encoder;
+ encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
+ INTEL_OUTPUT_MIPI2;
+ drm_sysfs_connector_add(connector);
+ return;
+
+ /*TODO: add code to destroy outputs on error*/
+dsi_init_err1:
+ /*destroy sender*/
+ mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
+
+ drm_connector_cleanup(connector);
+
+ kfree(dsi_config->fixed_mode);
+ kfree(dsi_config);
+dsi_init_err0:
+ kfree(dsi_connector);
+}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
new file mode 100644
index 0000000..21071ce
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_OUTPUT_H__
+#define __MDFLD_DSI_OUTPUT_H__
+
+#include <linux/backlight.h>
+#include <linux/version.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "mdfld_output.h"
+
+#include <asm/mrst.h>
+
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define REG_FLD_MOD(reg, val, start, end) \
+ REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end))
+
+static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg,
+ u32 val, int start, int end)
+{
+ int t = 100000;
+
+ while (FLD_GET(REG_READ(reg), start, end) != val) {
+ if (--t == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+#define REG_FLD_WAIT(reg, val, start, end) \
+ REGISTER_FLD_WAIT(dev, reg, val, start, end)
+
+#define REG_BIT_WAIT(reg, val, bitnum) \
+ REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum)
+
+#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
+
+#ifdef DEBUG
+#define CHECK_PIPE(pipe) ({ \
+ const typeof(pipe) __pipe = (pipe); \
+ BUG_ON(__pipe != 0 && __pipe != 2); \
+ __pipe; })
+#else
+#define CHECK_PIPE(pipe) (pipe)
+#endif
+
+/*
+ * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2
+ */
+#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400)
+
+/* mdfld DSI controller registers */
+#define MIPI_DEVICE_READY_REG(pipe) (0xb000 + REG_OFFSET(pipe))
+#define MIPI_INTR_STAT_REG(pipe) (0xb004 + REG_OFFSET(pipe))
+#define MIPI_INTR_EN_REG(pipe) (0xb008 + REG_OFFSET(pipe))
+#define MIPI_DSI_FUNC_PRG_REG(pipe) (0xb00c + REG_OFFSET(pipe))
+#define MIPI_HS_TX_TIMEOUT_REG(pipe) (0xb010 + REG_OFFSET(pipe))
+#define MIPI_LP_RX_TIMEOUT_REG(pipe) (0xb014 + REG_OFFSET(pipe))
+#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe) (0xb018 + REG_OFFSET(pipe))
+#define MIPI_DEVICE_RESET_TIMER_REG(pipe) (0xb01c + REG_OFFSET(pipe))
+#define MIPI_DPI_RESOLUTION_REG(pipe) (0xb020 + REG_OFFSET(pipe))
+#define MIPI_DBI_FIFO_THROTTLE_REG(pipe) (0xb024 + REG_OFFSET(pipe))
+#define MIPI_HSYNC_COUNT_REG(pipe) (0xb028 + REG_OFFSET(pipe))
+#define MIPI_HBP_COUNT_REG(pipe) (0xb02c + REG_OFFSET(pipe))
+#define MIPI_HFP_COUNT_REG(pipe) (0xb030 + REG_OFFSET(pipe))
+#define MIPI_HACTIVE_COUNT_REG(pipe) (0xb034 + REG_OFFSET(pipe))
+#define MIPI_VSYNC_COUNT_REG(pipe) (0xb038 + REG_OFFSET(pipe))
+#define MIPI_VBP_COUNT_REG(pipe) (0xb03c + REG_OFFSET(pipe))
+#define MIPI_VFP_COUNT_REG(pipe) (0xb040 + REG_OFFSET(pipe))
+#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe) (0xb044 + REG_OFFSET(pipe))
+#define MIPI_DPI_CONTROL_REG(pipe) (0xb048 + REG_OFFSET(pipe))
+#define MIPI_DPI_DATA_REG(pipe) (0xb04c + REG_OFFSET(pipe))
+#define MIPI_INIT_COUNT_REG(pipe) (0xb050 + REG_OFFSET(pipe))
+#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe) (0xb054 + REG_OFFSET(pipe))
+#define MIPI_VIDEO_MODE_FORMAT_REG(pipe) (0xb058 + REG_OFFSET(pipe))
+#define MIPI_EOT_DISABLE_REG(pipe) (0xb05c + REG_OFFSET(pipe))
+#define MIPI_LP_BYTECLK_REG(pipe) (0xb060 + REG_OFFSET(pipe))
+#define MIPI_LP_GEN_DATA_REG(pipe) (0xb064 + REG_OFFSET(pipe))
+#define MIPI_HS_GEN_DATA_REG(pipe) (0xb068 + REG_OFFSET(pipe))
+#define MIPI_LP_GEN_CTRL_REG(pipe) (0xb06c + REG_OFFSET(pipe))
+#define MIPI_HS_GEN_CTRL_REG(pipe) (0xb070 + REG_OFFSET(pipe))
+#define MIPI_GEN_FIFO_STAT_REG(pipe) (0xb074 + REG_OFFSET(pipe))
+#define MIPI_HS_LS_DBI_ENABLE_REG(pipe) (0xb078 + REG_OFFSET(pipe))
+#define MIPI_DPHY_PARAM_REG(pipe) (0xb080 + REG_OFFSET(pipe))
+#define MIPI_DBI_BW_CTRL_REG(pipe) (0xb084 + REG_OFFSET(pipe))
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe) (0xb088 + REG_OFFSET(pipe))
+
+#define MIPI_CTRL_REG(pipe) (0xb104 + REG_OFFSET(pipe))
+#define MIPI_DATA_ADD_REG(pipe) (0xb108 + REG_OFFSET(pipe))
+#define MIPI_DATA_LEN_REG(pipe) (0xb10c + REG_OFFSET(pipe))
+#define MIPI_CMD_ADD_REG(pipe) (0xb110 + REG_OFFSET(pipe))
+#define MIPI_CMD_LEN_REG(pipe) (0xb114 + REG_OFFSET(pipe))
+
+/* non-uniform reg offset */
+#define MIPI_PORT_CONTROL(pipe) (CHECK_PIPE(pipe) ? MIPI_C : MIPI)
+
+#define DSI_DEVICE_READY (0x1)
+#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1)
+#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1)
+#define DSI_POWER_STATE_ULPS_OFFSET (0x1)
+
+
+#define DSI_ONE_DATA_LANE (0x1)
+#define DSI_TWO_DATA_LANE (0x2)
+#define DSI_THREE_DATA_LANE (0X3)
+#define DSI_FOUR_DATA_LANE (0x4)
+#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3)
+#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5)
+#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7)
+#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13)
+
+#define DSI_INTR_STATE_RXSOTERROR BIT(0)
+
+#define DSI_INTR_STATE_SPL_PKG_SENT BIT(30)
+#define DSI_INTR_STATE_TE BIT(31)
+
+#define DSI_HS_TX_TIMEOUT_MASK (0xffffff)
+
+#define DSI_LP_RX_TIMEOUT_MASK (0xffffff)
+
+#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f)
+
+#define DSI_RESET_TIMER_MASK (0xffff)
+
+#define DSI_DBI_FIFO_WM_HALF (0x0)
+#define DSI_DBI_FIFO_WM_QUARTER (0x1)
+#define DSI_DBI_FIFO_WM_LOW (0x2)
+
+#define DSI_DPI_TIMING_MASK (0xffff)
+
+#define DSI_INIT_TIMER_MASK (0xffff)
+
+#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff)
+
+#define DSI_LP_BYTECLK_MASK (0x0ffff)
+
+#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03)
+#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13)
+#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23)
+#define DSI_HS_CTRL_GEN_R0 (0x04)
+#define DSI_HS_CTRL_GEN_R1 (0x14)
+#define DSI_HS_CTRL_GEN_R2 (0x24)
+#define DSI_HS_CTRL_GEN_LONG_W (0x29)
+#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05)
+#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15)
+#define DSI_HS_CTRL_MCS_R0 (0x06)
+#define DSI_HS_CTRL_MCS_LONG_W (0x39)
+#define DSI_HS_CTRL_VC_OFFSET (0x06)
+#define DSI_HS_CTRL_WC_OFFSET (0x08)
+
+#define DSI_FIFO_GEN_HS_DATA_FULL BIT(0)
+#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY BIT(1)
+#define DSI_FIFO_GEN_HS_DATA_EMPTY BIT(2)
+#define DSI_FIFO_GEN_LP_DATA_FULL BIT(8)
+#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY BIT(9)
+#define DSI_FIFO_GEN_LP_DATA_EMPTY BIT(10)
+#define DSI_FIFO_GEN_HS_CTRL_FULL BIT(16)
+#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY BIT(17)
+#define DSI_FIFO_GEN_HS_CTRL_EMPTY BIT(18)
+#define DSI_FIFO_GEN_LP_CTRL_FULL BIT(24)
+#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY BIT(25)
+#define DSI_FIFO_GEN_LP_CTRL_EMPTY BIT(26)
+#define DSI_FIFO_DBI_EMPTY BIT(27)
+#define DSI_FIFO_DPI_EMPTY BIT(28)
+
+#define DSI_DBI_HS_LP_SWITCH_MASK (0x1)
+
+#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0)
+#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16)
+
+#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001)
+#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002)
+
+/*dsi power modes*/
+#define DSI_POWER_MODE_DISPLAY_ON BIT(2)
+#define DSI_POWER_MODE_NORMAL_ON BIT(3)
+#define DSI_POWER_MODE_SLEEP_OUT BIT(4)
+#define DSI_POWER_MODE_PARTIAL_ON BIT(5)
+#define DSI_POWER_MODE_IDLE_ON BIT(6)
+
+enum {
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
+ MDFLD_DSI_VIDEO_BURST_MODE = 3,
+};
+
+#define DSI_DPI_COMPLETE_LAST_LINE BIT(2)
+#define DSI_DPI_DISABLE_BTA BIT(3)
+
+struct mdfld_dsi_connector {
+ struct psb_intel_connector base;
+
+ int pipe;
+ void *private;
+ void *pkg_sender;
+
+ /* Connection status */
+ enum drm_connector_status status;
+};
+
+struct mdfld_dsi_encoder {
+ struct psb_intel_encoder base;
+ void *private;
+};
+
+/*
+ * DSI config, consists of one DSI connector, two DSI encoders.
+ * DRM will pick up on DSI encoder basing on differents configs.
+ */
+struct mdfld_dsi_config {
+ struct drm_device *dev;
+ struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *mode;
+
+ struct mdfld_dsi_connector *connector;
+ struct mdfld_dsi_encoder *encoder;
+
+ int changed;
+
+ int bpp;
+ int lane_count;
+ /*Virtual channel number for this encoder*/
+ int channel_num;
+ /*video mode configure*/
+ int video_mode;
+
+ int dvr_ic_inited;
+};
+
+static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
+ struct drm_connector *connector)
+{
+ struct psb_intel_connector *psb_connector;
+
+ psb_connector = to_psb_intel_connector(connector);
+
+ return container_of(psb_connector, struct mdfld_dsi_connector, base);
+}
+
+static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
+ struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *psb_encoder;
+
+ psb_encoder = to_psb_intel_encoder(encoder);
+
+ return container_of(psb_encoder, struct mdfld_dsi_encoder, base);
+}
+
+static inline struct mdfld_dsi_config *
+ mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
+{
+ if (!connector)
+ return NULL;
+ return (struct mdfld_dsi_config *)connector->private;
+}
+
+static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
+{
+ struct mdfld_dsi_connector *dsi_connector;
+
+ if (!config)
+ return NULL;
+
+ dsi_connector = config->connector;
+
+ if (!dsi_connector)
+ return NULL;
+
+ return dsi_connector->pkg_sender;
+}
+
+static inline struct mdfld_dsi_config *
+ mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
+{
+ if (!encoder)
+ return NULL;
+ return (struct mdfld_dsi_config *)encoder->private;
+}
+
+static inline struct mdfld_dsi_connector *
+ mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_config *config;
+
+ if (!encoder)
+ return NULL;
+
+ config = mdfld_dsi_encoder_get_config(encoder);
+ if (!config)
+ return NULL;
+
+ return config->connector;
+}
+
+static inline void *mdfld_dsi_encoder_get_pkg_sender(
+ struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_config *dsi_config;
+
+ dsi_config = mdfld_dsi_encoder_get_config(encoder);
+ if (!dsi_config)
+ return NULL;
+
+ return mdfld_dsi_get_pkg_sender(dsi_config);
+}
+
+static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_connector *connector;
+
+ if (!encoder)
+ return -1;
+
+ connector = mdfld_dsi_encoder_get_connector(encoder);
+ if (!connector)
+ return -1;
+ return connector->pipe;
+}
+
+/* Export functions */
+extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
+ u32 gen_fifo_stat_reg, u32 fifo_stat);
+extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
+ int level);
+extern void mdfld_dsi_output_init(struct drm_device *dev,
+ int pipe,
+ const struct panel_funcs *p_vid_funcs);
+extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+
+extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+ u32 *mode, bool hs);
+extern int mdfld_dsi_panel_reset(int pipe);
+
+#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
new file mode 100644
index 0000000..baa0e14
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -0,0 +1,694 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/freezer.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dpi.h"
+
+#define MDFLD_DSI_READ_MAX_COUNT 5000
+
+enum data_type {
+ DSI_DT_GENERIC_SHORT_WRITE_0 = 0x03,
+ DSI_DT_GENERIC_SHORT_WRITE_1 = 0x13,
+ DSI_DT_GENERIC_SHORT_WRITE_2 = 0x23,
+ DSI_DT_GENERIC_READ_0 = 0x04,
+ DSI_DT_GENERIC_READ_1 = 0x14,
+ DSI_DT_GENERIC_READ_2 = 0x24,
+ DSI_DT_GENERIC_LONG_WRITE = 0x29,
+ DSI_DT_DCS_SHORT_WRITE_0 = 0x05,
+ DSI_DT_DCS_SHORT_WRITE_1 = 0x15,
+ DSI_DT_DCS_READ = 0x06,
+ DSI_DT_DCS_LONG_WRITE = 0x39,
+};
+
+enum {
+ MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
+};
+
+enum {
+ MDFLD_DSI_PKG_SENDER_FREE = 0x0,
+ MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
+};
+
+static const char *const dsi_errors[] = {
+ "RX SOT Error",
+ "RX SOT Sync Error",
+ "RX EOT Sync Error",
+ "RX Escape Mode Entry Error",
+ "RX LP TX Sync Error",
+ "RX HS Receive Timeout Error",
+ "RX False Control Error",
+ "RX ECC Single Bit Error",
+ "RX ECC Multibit Error",
+ "RX Checksum Error",
+ "RX DSI Data Type Not Recognised",
+ "RX DSI VC ID Invalid",
+ "TX False Control Error",
+ "TX ECC Single Bit Error",
+ "TX ECC Multibit Error",
+ "TX Checksum Error",
+ "TX DSI Data Type Not Recognised",
+ "TX DSI VC ID invalid",
+ "High Contention",
+ "Low contention",
+ "DPI FIFO Under run",
+ "HS TX Timeout",
+ "LP RX Timeout",
+ "Turn Around ACK Timeout",
+ "ACK With No Error",
+ "RX Invalid TX Length",
+ "RX Prot Violation",
+ "HS Generic Write FIFO Full",
+ "LP Generic Write FIFO Full",
+ "Generic Read Data Avail"
+ "Special Packet Sent",
+ "Tearing Effect",
+};
+
+static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
+ u32 mask)
+{
+ struct drm_device *dev = sender->dev;
+ u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
+ int retry = 0xffff;
+
+ while (retry--) {
+ if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
+ return 0;
+ udelay(100);
+ }
+ DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
+ return -EIO;
+}
+
+static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) |
+ BIT(26) | BIT(27) | BIT(28)));
+}
+
+static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26)));
+}
+
+static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18)));
+}
+
+static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
+{
+ u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+ struct drm_device *dev = sender->dev;
+
+ dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask);
+
+ switch (mask) {
+ case BIT(0):
+ case BIT(1):
+ case BIT(2):
+ case BIT(3):
+ case BIT(4):
+ case BIT(5):
+ case BIT(6):
+ case BIT(7):
+ case BIT(8):
+ case BIT(9):
+ case BIT(10):
+ case BIT(11):
+ case BIT(12):
+ case BIT(13):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ case BIT(14):
+ /*wait for all fifo empty*/
+ /*wait_for_all_fifos_empty(sender)*/;
+ break;
+ case BIT(15):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ case BIT(16):
+ break;
+ case BIT(17):
+ break;
+ case BIT(18):
+ case BIT(19):
+ dev_dbg(sender->dev->dev, "High/Low contention detected\n");
+ /*wait for contention recovery time*/
+ /*mdelay(10);*/
+ /*wait for all fifo empty*/
+ if (0)
+ wait_for_all_fifos_empty(sender);
+ break;
+ case BIT(20):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ case BIT(21):
+ /*wait for all fifo empty*/
+ /*wait_for_all_fifos_empty(sender);*/
+ break;
+ case BIT(22):
+ break;
+ case BIT(23):
+ case BIT(24):
+ case BIT(25):
+ case BIT(26):
+ case BIT(27):
+ dev_dbg(sender->dev->dev, "HS Gen fifo full\n");
+ REG_WRITE(intr_stat_reg, mask);
+ wait_for_hs_fifos_empty(sender);
+ break;
+ case BIT(28):
+ dev_dbg(sender->dev->dev, "LP Gen fifo full\n");
+ REG_WRITE(intr_stat_reg, mask);
+ wait_for_lp_fifos_empty(sender);
+ break;
+ case BIT(29):
+ case BIT(30):
+ case BIT(31):
+ dev_dbg(sender->dev->dev, "No Action required\n");
+ break;
+ }
+
+ if (mask & REG_READ(intr_stat_reg))
+ dev_dbg(sender->dev->dev,
+ "Cannot clean interrupt 0x%08x\n", mask);
+ return 0;
+}
+
+static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
+{
+ struct drm_device *dev = sender->dev;
+ u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+ u32 mask;
+ u32 intr_stat;
+ int i;
+ int err = 0;
+
+ intr_stat = REG_READ(intr_stat_reg);
+
+ for (i = 0; i < 32; i++) {
+ mask = (0x00000001UL) << i;
+ if (intr_stat & mask) {
+ dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]);
+ err = handle_dsi_error(sender, mask);
+ if (err)
+ DRM_ERROR("Cannot handle error\n");
+ }
+ }
+ return err;
+}
+
+static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 cmd, u8 param, bool hs)
+{
+ struct drm_device *dev = sender->dev;
+ u32 ctrl_reg;
+ u32 val;
+ u8 virtual_channel = 0;
+
+ if (hs) {
+ ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+
+ /* FIXME: wait_for_hs_fifos_empty(sender); */
+ } else {
+ ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+
+ /* FIXME: wait_for_lp_fifos_empty(sender); */
+ }
+
+ val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) |
+ FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0);
+
+ REG_WRITE(ctrl_reg, val);
+
+ return 0;
+}
+
+static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, int len, bool hs)
+{
+ struct drm_device *dev = sender->dev;
+ u32 ctrl_reg;
+ u32 data_reg;
+ u32 val;
+ u8 *p;
+ u8 b1, b2, b3, b4;
+ u8 virtual_channel = 0;
+ int i;
+
+ if (hs) {
+ ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+ data_reg = sender->mipi_hs_gen_data_reg;
+
+ /* FIXME: wait_for_hs_fifos_empty(sender); */
+ } else {
+ ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+ data_reg = sender->mipi_lp_gen_data_reg;
+
+ /* FIXME: wait_for_lp_fifos_empty(sender); */
+ }
+
+ p = data;
+ for (i = 0; i < len / 4; i++) {
+ b1 = *p++;
+ b2 = *p++;
+ b3 = *p++;
+ b4 = *p++;
+
+ REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1);
+ }
+
+ i = len % 4;
+ if (i) {
+ b1 = 0; b2 = 0; b3 = 0;
+
+ switch (i) {
+ case 3:
+ b1 = *p++;
+ b2 = *p++;
+ b3 = *p++;
+ break;
+ case 2:
+ b1 = *p++;
+ b2 = *p++;
+ break;
+ case 1:
+ b1 = *p++;
+ break;
+ }
+
+ REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1);
+ }
+
+ val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) |
+ FLD_VAL(data_type, 5, 0);
+
+ REG_WRITE(ctrl_reg, val);
+
+ return 0;
+}
+
+static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len)
+{
+ u8 cmd;
+
+ switch (data_type) {
+ case DSI_DT_DCS_SHORT_WRITE_0:
+ case DSI_DT_DCS_SHORT_WRITE_1:
+ case DSI_DT_DCS_LONG_WRITE:
+ cmd = *data;
+ break;
+ default:
+ return 0;
+ }
+
+ /*this prevents other package sending while doing msleep*/
+ sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
+
+ /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
+ if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ }
+
+ if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ }
+ return 0;
+}
+
+static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len)
+{
+ u8 cmd;
+
+ switch (data_type) {
+ case DSI_DT_DCS_SHORT_WRITE_0:
+ case DSI_DT_DCS_SHORT_WRITE_1:
+ case DSI_DT_DCS_LONG_WRITE:
+ cmd = *data;
+ break;
+ default:
+ return 0;
+ }
+
+ /*update panel status*/
+ if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+ sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+ sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ } else if (unlikely(cmd == DCS_SOFT_RESET)) {
+ /*TODO: replace it with msleep later*/
+ mdelay(5);
+ }
+
+ sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+ return 0;
+}
+
+static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len, bool hs)
+{
+ int ret;
+
+ /*handle DSI error*/
+ ret = dsi_error_handler(sender);
+ if (ret) {
+ DRM_ERROR("Error handling failed\n");
+ return -EAGAIN;
+ }
+
+ /* send pkg */
+ if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
+ DRM_ERROR("sender is busy\n");
+ return -EAGAIN;
+ }
+
+ ret = send_pkg_prepare(sender, data_type, data, len);
+ if (ret) {
+ DRM_ERROR("send_pkg_prepare error\n");
+ return ret;
+ }
+
+ switch (data_type) {
+ case DSI_DT_GENERIC_SHORT_WRITE_0:
+ case DSI_DT_GENERIC_SHORT_WRITE_1:
+ case DSI_DT_GENERIC_SHORT_WRITE_2:
+ case DSI_DT_GENERIC_READ_0:
+ case DSI_DT_GENERIC_READ_1:
+ case DSI_DT_GENERIC_READ_2:
+ case DSI_DT_DCS_SHORT_WRITE_0:
+ case DSI_DT_DCS_SHORT_WRITE_1:
+ case DSI_DT_DCS_READ:
+ ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
+ break;
+ case DSI_DT_GENERIC_LONG_WRITE:
+ case DSI_DT_DCS_LONG_WRITE:
+ ret = send_long_pkg(sender, data_type, data, len, hs);
+ break;
+ }
+
+ send_pkg_done(sender, data_type, data, len);
+
+ /*FIXME: should I query complete and fifo empty here?*/
+
+ return ret;
+}
+
+int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs)
+{
+ unsigned long flags;
+
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u8 param, u8 param_num, bool hs)
+{
+ u8 data[2];
+ unsigned long flags;
+ u8 data_type;
+
+ if (!sender) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ data[0] = cmd;
+
+ if (param_num) {
+ data_type = DSI_DT_DCS_SHORT_WRITE_1;
+ data[1] = param;
+ } else {
+ data_type = DSI_DT_DCS_SHORT_WRITE_0;
+ data[1] = 0;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, data_type, data, sizeof(data), hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
+ u8 param1, u8 param_num, bool hs)
+{
+ u8 data[2];
+ unsigned long flags;
+ u8 data_type;
+
+ if (!sender || param_num > 2) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ switch (param_num) {
+ case 0:
+ data_type = DSI_DT_GENERIC_SHORT_WRITE_0;
+ data[0] = 0;
+ data[1] = 0;
+ break;
+ case 1:
+ data_type = DSI_DT_GENERIC_SHORT_WRITE_1;
+ data[0] = param0;
+ data[1] = 0;
+ break;
+ case 2:
+ data_type = DSI_DT_GENERIC_SHORT_WRITE_2;
+ data[0] = param0;
+ data[1] = param1;
+ break;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, data_type, data, sizeof(data), hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs)
+{
+ unsigned long flags;
+
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&sender->lock, flags);
+ send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+ u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
+{
+ unsigned long flags;
+ struct drm_device *dev = sender->dev;
+ int i;
+ u32 gen_data_reg;
+ int retry = MDFLD_DSI_READ_MAX_COUNT;
+
+ if (!sender || !data_out || !len_out) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ /**
+ * do reading.
+ * 0) send out generic read request
+ * 1) polling read data avail interrupt
+ * 2) read data
+ */
+ spin_lock_irqsave(&sender->lock, flags);
+
+ REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
+
+ if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29)))
+ DRM_ERROR("Can NOT clean read data valid interrupt\n");
+
+ /*send out read request*/
+ send_pkg(sender, data_type, data, len, hs);
+
+ /*polling read data avail interrupt*/
+ while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) {
+ udelay(100);
+ retry--;
+ }
+
+ if (!retry) {
+ spin_unlock_irqrestore(&sender->lock, flags);
+ return -ETIMEDOUT;
+ }
+
+ REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
+
+ /*read data*/
+ if (hs)
+ gen_data_reg = sender->mipi_hs_gen_data_reg;
+ else
+ gen_data_reg = sender->mipi_lp_gen_data_reg;
+
+ for (i = 0; i < len_out; i++)
+ *(data_out + i) = REG_READ(gen_data_reg);
+
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u32 *data, u16 len, bool hs)
+{
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1,
+ data, len, hs);
+}
+
+int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+ int pipe)
+{
+ struct mdfld_dsi_pkg_sender *pkg_sender;
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ struct drm_device *dev = dsi_config->dev;
+ u32 mipi_val = 0;
+
+ if (!dsi_connector) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ pkg_sender = dsi_connector->pkg_sender;
+
+ if (!pkg_sender || IS_ERR(pkg_sender)) {
+ pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
+ GFP_KERNEL);
+ if (!pkg_sender) {
+ DRM_ERROR("Create DSI pkg sender failed\n");
+ return -ENOMEM;
+ }
+ dsi_connector->pkg_sender = (void *)pkg_sender;
+ }
+
+ pkg_sender->dev = dev;
+ pkg_sender->dsi_connector = dsi_connector;
+ pkg_sender->pipe = pipe;
+ pkg_sender->pkg_num = 0;
+ pkg_sender->panel_mode = 0;
+ pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+ /*init regs*/
+ if (pipe == 0) {
+ pkg_sender->dpll_reg = MRST_DPLL_A;
+ pkg_sender->dspcntr_reg = DSPACNTR;
+ pkg_sender->pipeconf_reg = PIPEACONF;
+ pkg_sender->dsplinoff_reg = DSPALINOFF;
+ pkg_sender->dspsurf_reg = DSPASURF;
+ pkg_sender->pipestat_reg = PIPEASTAT;
+ } else if (pipe == 2) {
+ pkg_sender->dpll_reg = MRST_DPLL_A;
+ pkg_sender->dspcntr_reg = DSPCCNTR;
+ pkg_sender->pipeconf_reg = PIPECCONF;
+ pkg_sender->dsplinoff_reg = DSPCLINOFF;
+ pkg_sender->dspsurf_reg = DSPCSURF;
+ pkg_sender->pipestat_reg = PIPECSTAT;
+ }
+
+ pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
+ pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
+ pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
+ pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe);
+ pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
+ pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+ pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe);
+ pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe);
+ pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe);
+ pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe);
+
+ /*init lock*/
+ spin_lock_init(&pkg_sender->lock);
+
+ if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
+ /**
+ * For video mode, don't enable DPI timing output here,
+ * will init the DPI timing output during mode setting.
+ */
+ mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+ if (pipe == 0)
+ mipi_val |= 0x2;
+
+ REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val);
+ REG_READ(MIPI_PORT_CONTROL(pipe));
+
+ /* do dsi controller init */
+ mdfld_dsi_controller_init(dsi_config, pipe);
+ }
+
+ return 0;
+}
+
+void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
+{
+ if (!sender || IS_ERR(sender))
+ return;
+
+ /*free*/
+ kfree(sender);
+}
+
+
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
new file mode 100644
index 0000000..459cd7e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+#ifndef __MDFLD_DSI_PKG_SENDER_H__
+#define __MDFLD_DSI_PKG_SENDER_H__
+
+#include <linux/kthread.h>
+
+#define MDFLD_MAX_DCS_PARAM 8
+
+struct mdfld_dsi_pkg_sender {
+ struct drm_device *dev;
+ struct mdfld_dsi_connector *dsi_connector;
+ u32 status;
+ u32 panel_mode;
+
+ int pipe;
+
+ spinlock_t lock;
+
+ u32 pkg_num;
+
+ /* Registers */
+ u32 dpll_reg;
+ u32 dspcntr_reg;
+ u32 pipeconf_reg;
+ u32 pipestat_reg;
+ u32 dsplinoff_reg;
+ u32 dspsurf_reg;
+
+ u32 mipi_intr_stat_reg;
+ u32 mipi_lp_gen_data_reg;
+ u32 mipi_hs_gen_data_reg;
+ u32 mipi_lp_gen_ctrl_reg;
+ u32 mipi_hs_gen_ctrl_reg;
+ u32 mipi_gen_fifo_stat_reg;
+ u32 mipi_data_addr_reg;
+ u32 mipi_data_len_reg;
+ u32 mipi_cmd_addr_reg;
+ u32 mipi_cmd_len_reg;
+};
+
+/* DCS definitions */
+#define DCS_SOFT_RESET 0x01
+#define DCS_ENTER_SLEEP_MODE 0x10
+#define DCS_EXIT_SLEEP_MODE 0x11
+#define DCS_SET_DISPLAY_OFF 0x28
+#define DCS_SET_DISPLAY_ON 0x29
+#define DCS_SET_COLUMN_ADDRESS 0x2a
+#define DCS_SET_PAGE_ADDRESS 0x2b
+#define DCS_WRITE_MEM_START 0x2c
+#define DCS_SET_TEAR_OFF 0x34
+#define DCS_SET_TEAR_ON 0x35
+
+extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+ int pipe);
+extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
+int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u8 param, u8 param_num, bool hs);
+int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs);
+int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
+ u8 param1, u8 param_num, bool hs);
+int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+ u32 len, bool hs);
+/* Read interfaces */
+int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+ u32 *data, u16 len, bool hs);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
new file mode 100644
index 0000000..a35a292
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -0,0 +1,1180 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "framebuffer.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+
+/* Hardcoded currently */
+static int ksel = KSEL_CRYSTAL_19;
+
+struct psb_intel_range_t {
+ int min, max;
+};
+
+struct mrst_limit_t {
+ struct psb_intel_range_t dot, m, p1;
+};
+
+struct mrst_clock_t {
+ /* derived values */
+ int dot;
+ int m;
+ int p1;
+};
+
+#define COUNT_MAX 0x10000000
+
+void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
+{
+ int count, temp;
+ u32 pipeconf_reg = PIPEACONF;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return;
+ }
+
+ /* FIXME JLIU7_PO */
+ psb_intel_wait_for_vblank(dev);
+ return;
+
+ /* Wait for for the pipe disable to take effect. */
+ for (count = 0; count < COUNT_MAX; count++) {
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_PIPE_STATE) == 0)
+ break;
+ }
+}
+
+void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
+{
+ int count, temp;
+ u32 pipeconf_reg = PIPEACONF;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return;
+ }
+
+ /* FIXME JLIU7_PO */
+ psb_intel_wait_for_vblank(dev);
+ return;
+
+ /* Wait for for the pipe enable to take effect. */
+ for (count = 0; count < COUNT_MAX; count++) {
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_PIPE_STATE) == 1)
+ break;
+ }
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ return (pfit_control >> 29) & 0x3;
+}
+
+static struct drm_device globle_dev;
+
+void mdfld__intel_plane_set_alpha(int enable)
+{
+ struct drm_device *dev = &globle_dev;
+ int dspcntr_reg = DSPACNTR;
+ u32 dspcntr;
+
+ dspcntr = REG_READ(dspcntr_reg);
+
+ if (enable) {
+ dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
+ dspcntr |= DISPPLANE_32BPP;
+ } else {
+ dspcntr &= ~DISPPLANE_32BPP;
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ }
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+}
+
+static int check_fb(struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return 0;
+
+ switch (fb->bits_per_pixel) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ DRM_ERROR("Unknown color depth\n");
+ return -EINVAL;
+ }
+}
+
+static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_i915_master_private *master_priv; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+ int dsplinoff = DSPALINOFF;
+ int dspsurf = DSPASURF;
+ int dspstride = DSPASTRIDE;
+ int dspcntr_reg = DSPACNTR;
+ u32 dspcntr;
+ int ret;
+
+ memcpy(&globle_dev, dev, sizeof(struct drm_device));
+
+ dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_dbg(dev->dev, "No FB bound\n");
+ return 0;
+ }
+
+ ret = check_fb(crtc->fb);
+ if (ret)
+ return ret;
+
+ switch (pipe) {
+ case 0:
+ dsplinoff = DSPALINOFF;
+ break;
+ case 1:
+ dsplinoff = DSPBLINOFF;
+ dspsurf = DSPBSURF;
+ dspstride = DSPBSTRIDE;
+ dspcntr_reg = DSPBCNTR;
+ break;
+ case 2:
+ dsplinoff = DSPCLINOFF;
+ dspsurf = DSPCSURF;
+ dspstride = DSPCSTRIDE;
+ dspcntr_reg = DSPCCNTR;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return -EINVAL;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
+ start, offset, x, y);
+ REG_WRITE(dsplinoff, offset);
+ REG_READ(dsplinoff);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+/*
+ * Disable the pipe, plane and pll.
+ *
+ */
+void mdfld_disable_crtc(struct drm_device *dev, int pipe)
+{
+ int dpll_reg = MRST_DPLL_A;
+ int dspcntr_reg = DSPACNTR;
+ int dspbase_reg = MRST_DSPABASE;
+ int pipeconf_reg = PIPEACONF;
+ u32 temp;
+
+ dev_dbg(dev->dev, "pipe = %d\n", pipe);
+
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ dpll_reg = MDFLD_DPLL_B;
+ dspcntr_reg = DSPBCNTR;
+ dspbase_reg = DSPBSURF;
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ dpll_reg = MRST_DPLL_A;
+ dspcntr_reg = DSPCCNTR;
+ dspbase_reg = MDFLD_DSPCBASE;
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (pipe != 1)
+ mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
+ HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* FIXME_JLIU7 MDFLD_PO revisit */
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ temp &= ~PIPEACONF_ENABLE;
+ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+ REG_WRITE(pipeconf_reg, temp);
+ REG_READ(pipeconf_reg);
+
+ /* Wait for for the pipe disable to take effect. */
+ mdfldWaitForPipeDisable(dev, pipe);
+ }
+
+ temp = REG_READ(dpll_reg);
+ if (temp & DPLL_VCO_ENABLE) {
+ if ((pipe != 1 &&
+ !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
+ & PIPEACONF_ENABLE)) || pipe == 1) {
+ temp &= ~(DPLL_VCO_ENABLE);
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to turn off. */
+ /* FIXME_MDFLD PO may need more delay */
+ udelay(500);
+
+ if (!(temp & MDFLD_PWR_GATE_EN)) {
+ /* gating power of DPLL */
+ REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(5000);
+ }
+ }
+ }
+
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = MRST_DPLL_A;
+ int dspcntr_reg = DSPACNTR;
+ int dspbase_reg = MRST_DSPABASE;
+ int pipeconf_reg = PIPEACONF;
+ u32 pipestat_reg = PIPEASTAT;
+ u32 pipeconf = dev_priv->pipeconf[pipe];
+ u32 temp;
+ int timeout = 0;
+
+ dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
+
+/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
+/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ dpll_reg = DPLL_B;
+ dspcntr_reg = DSPBCNTR;
+ dspbase_reg = MRST_DSPBBASE;
+ pipeconf_reg = PIPEBCONF;
+ dpll_reg = MDFLD_DPLL_B;
+ break;
+ case 2:
+ dpll_reg = MRST_DPLL_A;
+ dspcntr_reg = DSPCCNTR;
+ dspbase_reg = MDFLD_DSPCBASE;
+ pipeconf_reg = PIPECCONF;
+ pipestat_reg = PIPECSTAT;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ /* When ungating power of DPLL, needs to wait 0.5us
+ before enable the VCO */
+ if (temp & MDFLD_PWR_GATE_EN) {
+ temp &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(dpll_reg, temp);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+
+ /**
+ * wait for DSI PLL to lock
+ * NOTE: only need to poll status of pipe 0 and pipe 1,
+ * since both MIPI pipes share the same PLL.
+ */
+ while ((pipe != 2) && (timeout < 20000) &&
+ !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout++;
+ }
+ }
+
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(pipeconf_reg, pipeconf);
+
+ /* Wait for for the pipe enable to take effect. */
+ mdfldWaitForPipeEnable(dev, pipe);
+ }
+
+ /*workaround for sighting 3741701 Random X blank display*/
+ /*perform w/a in video mode only on pipe A or C*/
+ if (pipe == 0 || pipe == 2) {
+ REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+ msleep(100);
+ if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
+ dev_dbg(dev->dev, "OK");
+ else {
+ dev_dbg(dev->dev, "STUCK!!!!");
+ /*shutdown controller*/
+ temp = REG_READ(dspcntr_reg);
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
+ REG_WRITE(0xb048, 1);
+ msleep(100);
+ temp = REG_READ(pipeconf_reg);
+ temp &= ~PIPEACONF_ENABLE;
+ REG_WRITE(pipeconf_reg, temp);
+ msleep(100); /*wait for pipe disable*/
+ REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
+ msleep(100);
+ REG_WRITE(0xb004, REG_READ(0xb004));
+ /* try to bring the controller back up again*/
+ REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
+ temp = REG_READ(dspcntr_reg);
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
+ REG_WRITE(0xb048, 2);
+ msleep(100);
+ temp = REG_READ(pipeconf_reg);
+ temp |= PIPEACONF_ENABLE;
+ REG_WRITE(pipeconf_reg, temp);
+ }
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+ if (pipe != 1)
+ mdfld_dsi_gen_fifo_ready(dev,
+ MIPI_GEN_FIFO_STAT_REG(pipe),
+ HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ temp &= ~PIPEACONF_ENABLE;
+ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+ REG_WRITE(pipeconf_reg, temp);
+ REG_READ(pipeconf_reg);
+
+ /* Wait for for the pipe disable to take effect. */
+ mdfldWaitForPipeDisable(dev, pipe);
+ }
+
+ temp = REG_READ(dpll_reg);
+ if (temp & DPLL_VCO_ENABLE) {
+ if ((pipe != 1 && !((REG_READ(PIPEACONF)
+ | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
+ || pipe == 1) {
+ temp &= ~(DPLL_VCO_ENABLE);
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to turn off. */
+ /* FIXME_MDFLD PO may need more delay */
+ udelay(500);
+ }
+ }
+ break;
+ }
+ gma_power_end(dev);
+}
+
+
+#define MDFLD_LIMT_DPLL_19 0
+#define MDFLD_LIMT_DPLL_25 1
+#define MDFLD_LIMT_DPLL_83 2
+#define MDFLD_LIMT_DPLL_100 3
+#define MDFLD_LIMT_DSIPLL_19 4
+#define MDFLD_LIMT_DSIPLL_25 5
+#define MDFLD_LIMT_DSIPLL_83 6
+#define MDFLD_LIMT_DSIPLL_100 7
+
+#define MDFLD_DOT_MIN 19750
+#define MDFLD_DOT_MAX 120000
+#define MDFLD_DPLL_M_MIN_19 113
+#define MDFLD_DPLL_M_MAX_19 155
+#define MDFLD_DPLL_P1_MIN_19 2
+#define MDFLD_DPLL_P1_MAX_19 10
+#define MDFLD_DPLL_M_MIN_25 101
+#define MDFLD_DPLL_M_MAX_25 130
+#define MDFLD_DPLL_P1_MIN_25 2
+#define MDFLD_DPLL_P1_MAX_25 10
+#define MDFLD_DPLL_M_MIN_83 64
+#define MDFLD_DPLL_M_MAX_83 64
+#define MDFLD_DPLL_P1_MIN_83 2
+#define MDFLD_DPLL_P1_MAX_83 2
+#define MDFLD_DPLL_M_MIN_100 64
+#define MDFLD_DPLL_M_MAX_100 64
+#define MDFLD_DPLL_P1_MIN_100 2
+#define MDFLD_DPLL_P1_MAX_100 2
+#define MDFLD_DSIPLL_M_MIN_19 131
+#define MDFLD_DSIPLL_M_MAX_19 175
+#define MDFLD_DSIPLL_P1_MIN_19 3
+#define MDFLD_DSIPLL_P1_MAX_19 8
+#define MDFLD_DSIPLL_M_MIN_25 97
+#define MDFLD_DSIPLL_M_MAX_25 140
+#define MDFLD_DSIPLL_P1_MIN_25 3
+#define MDFLD_DSIPLL_P1_MAX_25 9
+#define MDFLD_DSIPLL_M_MIN_83 33
+#define MDFLD_DSIPLL_M_MAX_83 92
+#define MDFLD_DSIPLL_P1_MIN_83 2
+#define MDFLD_DSIPLL_P1_MAX_83 3
+#define MDFLD_DSIPLL_M_MIN_100 97
+#define MDFLD_DSIPLL_M_MAX_100 140
+#define MDFLD_DSIPLL_P1_MIN_100 3
+#define MDFLD_DSIPLL_P1_MAX_100 9
+
+static const struct mrst_limit_t mdfld_limits[] = {
+ { /* MDFLD_LIMT_DPLL_19 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
+ },
+ { /* MDFLD_LIMT_DPLL_25 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
+ },
+ { /* MDFLD_LIMT_DPLL_83 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
+ },
+ { /* MDFLD_LIMT_DPLL_100 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
+ },
+ { /* MDFLD_LIMT_DSIPLL_19 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
+ },
+ { /* MDFLD_LIMT_DSIPLL_25 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
+ },
+ { /* MDFLD_LIMT_DSIPLL_83 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
+ },
+ { /* MDFLD_LIMT_DSIPLL_100 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
+ },
+};
+
+#define MDFLD_M_MIN 21
+#define MDFLD_M_MAX 180
+static const u32 mdfld_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+ 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
+ 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
+ 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
+ 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
+ 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
+ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
+ 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
+ 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
+ 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
+ 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
+ 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
+ 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
+ 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
+ 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
+};
+
+static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
+{
+ const struct mrst_limit_t *limit = NULL;
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
+ else if (ksel == KSEL_BYPASS_25)
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 166))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 ||
+ dev_priv->core_freq == 200))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
+ } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
+ else if (ksel == KSEL_BYPASS_25)
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 166))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 ||
+ dev_priv->core_freq == 200))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
+ } else {
+ limit = NULL;
+ dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
+ }
+
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
+{
+ clock->dot = (refclk * clock->m) / clock->p1;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE. Divisor values are the actual divisors for
+ */
+static bool
+mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+ struct mrst_clock_t *best_clock)
+{
+ struct mrst_clock_t clock;
+ const struct mrst_limit_t *limit = mdfld_limit(crtc);
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ mdfld_clock(refclk, &clock);
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ return err != target;
+}
+
+static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = psb_intel_crtc->pipe;
+ int fp_reg = MRST_FPA0;
+ int dpll_reg = MRST_DPLL_A;
+ int dspcntr_reg = DSPACNTR;
+ int pipeconf_reg = PIPEACONF;
+ int htot_reg = HTOTAL_A;
+ int hblank_reg = HBLANK_A;
+ int hsync_reg = HSYNC_A;
+ int vtot_reg = VTOTAL_A;
+ int vblank_reg = VBLANK_A;
+ int vsync_reg = VSYNC_A;
+ int dspsize_reg = DSPASIZE;
+ int dsppos_reg = DSPAPOS;
+ int pipesrc_reg = PIPEASRC;
+ u32 *pipeconf = &dev_priv->pipeconf[pipe];
+ u32 *dspcntr = &dev_priv->dspcntr[pipe];
+ int refclk = 0;
+ int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
+ clk_tmp = 0;
+ struct mrst_clock_t clock;
+ bool ok;
+ u32 dpll = 0, fp = 0;
+ bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct psb_intel_encoder *psb_intel_encoder = NULL;
+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int timeout = 0;
+ int ret;
+
+ dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
+
+#if 0
+ if (pipe == 1) {
+ if (!gma_power_begin(dev, true))
+ return 0;
+ android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
+ x, y, old_fb);
+ goto mrst_crtc_mode_set_exit;
+ }
+#endif
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ fp_reg = FPB0;
+ dpll_reg = DPLL_B;
+ dspcntr_reg = DSPBCNTR;
+ pipeconf_reg = PIPEBCONF;
+ htot_reg = HTOTAL_B;
+ hblank_reg = HBLANK_B;
+ hsync_reg = HSYNC_B;
+ vtot_reg = VTOTAL_B;
+ vblank_reg = VBLANK_B;
+ vsync_reg = VSYNC_B;
+ dspsize_reg = DSPBSIZE;
+ dsppos_reg = DSPBPOS;
+ pipesrc_reg = PIPEBSRC;
+ fp_reg = MDFLD_DPLL_DIV0;
+ dpll_reg = MDFLD_DPLL_B;
+ break;
+ case 2:
+ dpll_reg = MRST_DPLL_A;
+ dspcntr_reg = DSPCCNTR;
+ pipeconf_reg = PIPECCONF;
+ htot_reg = HTOTAL_C;
+ hblank_reg = HBLANK_C;
+ hsync_reg = HSYNC_C;
+ vtot_reg = VTOTAL_C;
+ vblank_reg = VBLANK_C;
+ vsync_reg = VSYNC_C;
+ dspsize_reg = DSPCSIZE;
+ dsppos_reg = DSPCPOS;
+ pipesrc_reg = PIPECSRC;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number.\n");
+ return 0;
+ }
+
+ ret = check_fb(crtc->fb);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
+ adjusted_mode->hdisplay);
+ dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
+ adjusted_mode->vdisplay);
+ dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
+ adjusted_mode->hsync_start);
+ dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
+ adjusted_mode->hsync_end);
+ dev_dbg(dev->dev, "adjusted_htotal = %d\n",
+ adjusted_mode->htotal);
+ dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
+ adjusted_mode->vsync_start);
+ dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
+ adjusted_mode->vsync_end);
+ dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
+ adjusted_mode->vtotal);
+ dev_dbg(dev->dev, "adjusted_clock = %d\n",
+ adjusted_mode->clock);
+ dev_dbg(dev->dev, "hdisplay = %d\n",
+ mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay = %d\n",
+ mode->vdisplay);
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ memcpy(&psb_intel_crtc->saved_mode, mode,
+ sizeof(struct drm_display_mode));
+ memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+ sizeof(struct drm_display_mode));
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector)
+ continue;
+
+ encoder = connector->encoder;
+
+ if (!encoder)
+ continue;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_MIPI:
+ is_mipi = true;
+ break;
+ case INTEL_OUTPUT_MIPI2:
+ is_mipi2 = true;
+ break;
+ case INTEL_OUTPUT_HDMI:
+ is_hdmi = true;
+ break;
+ }
+ }
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ if (pipe == 1) {
+ /* FIXME: To make HDMI display with 864x480 (TPO), 480x864
+ * (PYR) or 480x854 (TMD), set the sprite width/height and
+ * souce image size registers with the adjusted mode for
+ * pipe B.
+ */
+
+ /*
+ * The defined sprite rectangle must always be completely
+ * contained within the displayable area of the screen image
+ * (frame buffer).
+ */
+ REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+ | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
+ /* Set the CRTC with encoder mode. */
+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+ | (mode->crtc_vdisplay - 1));
+ } else {
+ REG_WRITE(dspsize_reg,
+ ((mode->crtc_vdisplay - 1) << 16) |
+ (mode->crtc_hdisplay - 1));
+ REG_WRITE(pipesrc_reg,
+ ((mode->crtc_hdisplay - 1) << 16) |
+ (mode->crtc_vdisplay - 1));
+ }
+
+ REG_WRITE(dsppos_reg, 0);
+
+ if (psb_intel_encoder)
+ drm_connector_property_get_value(connector,
+ dev->mode_config.scaling_mode_property, &scalingType);
+
+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+ /* Medfield doesn't have register support for centering so we
+ * need to mess with the h/vblank and h/vsync start and ends
+ * to get centering
+ */
+ int offsetX = 0, offsetY = 0;
+
+ offsetX = (adjusted_mode->crtc_hdisplay -
+ mode->crtc_hdisplay) / 2;
+ offsetY = (adjusted_mode->crtc_vdisplay -
+ mode->crtc_vdisplay) / 2;
+
+ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
+ offsetX - 1) |
+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
+ offsetX - 1) |
+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
+ offsetY - 1) |
+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
+ offsetY - 1) |
+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+ } else {
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ }
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* setup pipeconf */
+ *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+
+ /* Set up the display plane register */
+ *dspcntr = REG_READ(dspcntr_reg);
+ *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
+ *dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ if (is_mipi2)
+ goto mrst_crtc_mode_set_exit;
+ clk = adjusted_mode->clock;
+
+ if (is_hdmi) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
+ refclk = 19200;
+
+ if (is_mipi || is_mipi2)
+ clk_n = 1, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 1, clk_p2 = 10;
+ } else if (ksel == KSEL_BYPASS_25) {
+ refclk = 25000;
+
+ if (is_mipi || is_mipi2)
+ clk_n = 1, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 1, clk_p2 = 10;
+ } else if ((ksel == KSEL_BYPASS_83_100) &&
+ dev_priv->core_freq == 166) {
+ refclk = 83000;
+
+ if (is_mipi || is_mipi2)
+ clk_n = 4, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 4, clk_p2 = 10;
+ } else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 ||
+ dev_priv->core_freq == 200)) {
+ refclk = 100000;
+ if (is_mipi || is_mipi2)
+ clk_n = 4, clk_p2 = 8;
+ else if (is_hdmi)
+ clk_n = 4, clk_p2 = 10;
+ }
+
+ if (is_mipi)
+ clk_byte = dev_priv->bpp / 8;
+ else if (is_mipi2)
+ clk_byte = dev_priv->bpp2 / 8;
+
+ clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+
+ dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
+ clk, clk_n, clk_p2);
+ dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
+ adjusted_mode->clock, clk_tmp);
+
+ ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
+
+ if (!ok) {
+ DRM_ERROR
+ ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
+ } else {
+ m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
+
+ dev_dbg(dev->dev, "dot clock = %d,"
+ "m = %d, p1 = %d, m_conv = %d.\n",
+ clock.dot, clock.m,
+ clock.p1, m_conv);
+ }
+
+ dpll = REG_READ(dpll_reg);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ dpll &= ~DPLL_VCO_ENABLE;
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+
+ /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ /* reset M1, N1 & P1 */
+ REG_WRITE(fp_reg, 0);
+ dpll &= ~MDFLD_P1_MASK;
+ REG_WRITE(dpll_reg, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ /* When ungating power of DPLL, needs to wait 0.5us before
+ * enable the VCO */
+ if (dpll & MDFLD_PWR_GATE_EN) {
+ dpll &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(dpll_reg, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+ dpll = 0;
+
+#if 0 /* FIXME revisit later */
+ if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
+ ksel == KSEL_BYPASS_25)
+ dpll &= ~MDFLD_INPUT_REF_SEL;
+ else if (ksel == KSEL_BYPASS_83_100)
+ dpll |= MDFLD_INPUT_REF_SEL;
+#endif /* FIXME revisit later */
+
+ if (is_hdmi)
+ dpll |= MDFLD_VCO_SEL;
+
+ fp = (clk_n / 2) << 16;
+ fp |= m_conv;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 2)) << 17;
+
+#if 0 /* 1080p30 & 720p */
+ dpll = 0x00050000;
+ fp = 0x000001be;
+#endif
+#if 0 /* 480p */
+ dpll = 0x02010000;
+ fp = 0x000000d2;
+#endif
+ } else {
+#if 0 /*DBI_TPO_480x864*/
+ dpll = 0x00020000;
+ fp = 0x00000156;
+#endif /* DBI_TPO_480x864 */ /* get from spec. */
+
+ dpll = 0x00800000;
+ fp = 0x000000c1;
+ }
+
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ dpll |= DPLL_VCO_ENABLE;
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+
+ /* wait for DSI PLL to lock */
+ while (timeout < 20000 &&
+ !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout++;
+ }
+
+ if (is_mipi)
+ goto mrst_crtc_mode_set_exit;
+
+ dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
+
+ REG_WRITE(pipeconf_reg, *pipeconf);
+ REG_READ(pipeconf_reg);
+
+ /* Wait for for the pipe enable to take effect. */
+ REG_WRITE(dspcntr_reg, *dspcntr);
+ psb_intel_wait_for_vblank(dev);
+
+mrst_crtc_mode_set_exit:
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
+ .dpms = mdfld_crtc_dpms,
+ .mode_fixup = psb_intel_crtc_mode_fixup,
+ .mode_set = mdfld_crtc_mode_set,
+ .mode_set_base = mdfld__intel_pipe_set_base,
+ .prepare = psb_intel_crtc_prepare,
+ .commit = psb_intel_crtc_commit,
+};
+
diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c
new file mode 100644
index 0000000..c95966b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_output.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+
+#include "tc35876x-dsi-lvds.h"
+
+int mdfld_get_panel_type(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->mdfld_panel_id;
+}
+
+static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe,
+ int p_type)
+{
+ switch (p_type) {
+ case TPO_VID:
+ mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs);
+ break;
+ case TC35876X:
+ tc35876x_init(dev);
+ mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs);
+ break;
+ case TMD_VID:
+ mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs);
+ break;
+ case HDMI:
+/* if (dev_priv->mdfld_hdmi_present)
+ mdfld_hdmi_init(dev, &dev_priv->mode_dev); */
+ break;
+ }
+}
+
+
+int mdfld_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* FIXME: hardcoded for now */
+ dev_priv->mdfld_panel_id = TC35876X;
+ /* MIPI panel 1 */
+ mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id);
+ /* HDMI panel */
+ mdfld_init_panel(dev, 1, HDMI);
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
new file mode 100644
index 0000000..ab2b27c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_output.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef MDFLD_OUTPUT_H
+#define MDFLD_OUTPUT_H
+
+#include "psb_drv.h"
+
+#define TPO_PANEL_WIDTH 84
+#define TPO_PANEL_HEIGHT 46
+#define TMD_PANEL_WIDTH 39
+#define TMD_PANEL_HEIGHT 71
+
+struct mdfld_dsi_config;
+
+enum panel_type {
+ TPO_VID,
+ TMD_VID,
+ HDMI,
+ TC35876X,
+};
+
+struct panel_info {
+ u32 width_mm;
+ u32 height_mm;
+ /* Other info */
+};
+
+struct panel_funcs {
+ const struct drm_encoder_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_helper_funcs;
+ struct drm_display_mode * (*get_config_mode)(struct drm_device *);
+ int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
+ int (*reset)(int pipe);
+ void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
+};
+
+int mdfld_output_init(struct drm_device *dev);
+
+struct backlight_device *mdfld_get_backlight_device(void);
+int mdfld_set_brightness(struct backlight_device *bd);
+
+int mdfld_get_panel_type(struct drm_device *dev, int pipe);
+
+extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
+
+extern const struct panel_funcs mdfld_tmd_vid_funcs;
+extern const struct panel_funcs mdfld_tpo_vid_funcs;
+
+extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
+#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
new file mode 100644
index 0000000..dc0c6c3
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jim Liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ * Gideon Eaton <eaton.
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false; /*Disable GCT for now*/
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ if (use_gct) {
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+ } else {
+ mode->hdisplay = 480;
+ mode->vdisplay = 854;
+ mode->hsync_start = 487;
+ mode->hsync_end = 490;
+ mode->htotal = 499;
+ mode->vsync_start = 861;
+ mode->vsync_end = 865;
+ mode->vtotal = 873;
+ mode->clock = 33264;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static int tmd_vid_get_panel_info(struct drm_device *dev,
+ int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = TMD_PANEL_WIDTH;
+ pi->height_mm = TMD_PANEL_HEIGHT;
+
+ return 0;
+}
+
+/* ************************************************************************* *\
+ * FUNCTION: mdfld_init_TMD_MIPI
+ *
+ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
+ * restore_display_registers. since this function does not
+ * acquire the mutex, it is important that the calling function
+ * does!
+\* ************************************************************************* */
+
+/* FIXME: make the below data u8 instead of u32; note byte order! */
+static u32 tmd_cmd_mcap_off[] = {0x000000b2};
+static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
+static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
+static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
+static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
+static u32 tmd_cmd_set_mode[] = {0x000000b3};
+static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
+static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
+static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
+static u32 tmd_cmd_set_video_mode[] = {0x00000153};
+/*no auto_bl,need add in furture*/
+static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
+static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
+
+static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender
+ = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ DRM_INFO("Enter mdfld init TMD MIPI display.\n");
+
+ if (!sender) {
+ DRM_ERROR("Cannot get sender\n");
+ return;
+ }
+
+ if (dsi_config->dvr_ic_inited)
+ return;
+
+ msleep(3);
+
+ /* FIXME: make the below data u8 instead of u32; note byte order! */
+
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off,
+ sizeof(tmd_cmd_mcap_off), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch,
+ sizeof(tmd_cmd_enable_lane_switch), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num,
+ sizeof(tmd_cmd_set_lane_num), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0,
+ sizeof(tmd_cmd_pushing_clock0), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1,
+ sizeof(tmd_cmd_pushing_clock1), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode,
+ sizeof(tmd_cmd_set_mode), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode,
+ sizeof(tmd_cmd_set_sync_pulse_mode), false);
+ mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column,
+ sizeof(tmd_cmd_set_column), false);
+ mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page,
+ sizeof(tmd_cmd_set_page), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode,
+ sizeof(tmd_cmd_set_video_mode), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight,
+ sizeof(tmd_cmd_enable_backlight), false);
+ mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming,
+ sizeof(tmd_cmd_set_backlight_dimming), false);
+
+ dsi_config->dvr_ic_inited = 1;
+}
+
+/*TPO DPI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs
+ mdfld_tpo_dpi_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+/*TPO DPI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tmd_vid_funcs = {
+ .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
+ .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
+ .get_config_mode = &tmd_vid_get_config_mode,
+ .get_panel_info = tmd_vid_get_panel_info,
+ .reset = mdfld_dsi_panel_reset,
+ .drv_ic_init = mdfld_dsi_tmd_drv_ic_init,
+};
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
new file mode 100644
index 0000000..d8d4170
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+
+static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ if (use_gct) {
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay +
+ ((ti->hsync_offset_hi << 8) |
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start +
+ ((ti->hsync_pulse_width_hi << 8) |
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
+ ti->hblank_lo);
+ mode->vsync_start =
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) |
+ ti->vsync_offset_lo);
+ mode->vsync_end =
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) |
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay +
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+ } else {
+ mode->hdisplay = 864;
+ mode->vdisplay = 480;
+ mode->hsync_start = 873;
+ mode->hsync_end = 876;
+ mode->htotal = 887;
+ mode->vsync_start = 487;
+ mode->vsync_end = 490;
+ mode->vtotal = 499;
+ mode->clock = 33264;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static int tpo_vid_get_panel_info(struct drm_device *dev,
+ int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = TPO_PANEL_WIDTH;
+ pi->height_mm = TPO_PANEL_HEIGHT;
+
+ return 0;
+}
+
+/*TPO DPI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs
+ mdfld_tpo_dpi_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+/*TPO DPI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tpo_vid_funcs = {
+ .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
+ .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
+ .get_config_mode = &tpo_vid_get_config_mode,
+ .get_panel_info = tpo_vid_get_panel_info,
+};
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index e80ee82..49bac41 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -270,7 +270,7 @@ out_err1:
return NULL;
}
-void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
{
__free_page(pt->p);
kfree(pt);
@@ -351,7 +351,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
return pt;
}
-struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
@@ -488,15 +488,6 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
return pd;
}
-/* Returns the physical address of the PD shared by sgx/msvdx */
-uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
-{
- struct psb_mmu_pd *pd;
-
- pd = psb_mmu_get_default_pd(driver);
- return page_to_pfn(pd->p) << PAGE_SHIFT;
-}
-
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
psb_mmu_free_pagedir(driver->default_pd);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 9d12a3e..a39b0d0 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -115,7 +115,7 @@ static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
clock->dot = (refclk * clock->m) / (14 * clock->p1);
}
-void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
{
pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
prefix, clock->dot, clock->m, clock->p1);
@@ -169,7 +169,6 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
- bool enabled;
if (!gma_power_begin(dev, true))
return;
@@ -253,8 +252,6 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3FFF);
REG_WRITE(DSPFW1, 0x3F88080A);
@@ -310,7 +307,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct oaktrail_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_lvds = false;
bool is_mipi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct psb_intel_encoder *psb_intel_encoder = NULL;
@@ -340,12 +337,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_SDVO:
is_sdvo = true;
break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
- break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
case INTEL_OUTPUT_MIPI:
is_mipi = true;
break;
@@ -428,9 +419,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
- dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
- dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
-
if (is_mipi)
goto oaktrail_crtc_mode_set_exit;
@@ -517,7 +505,7 @@ static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-int oaktrail_pipe_set_base(struct drm_crtc *crtc,
+static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 63aea2f..41d1924 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -141,7 +141,7 @@ static const struct backlight_ops oaktrail_ops = {
.update_status = oaktrail_set_brightness,
};
-int oaktrail_backlight_init(struct drm_device *dev)
+static int oaktrail_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
int ret;
@@ -176,10 +176,6 @@ int oaktrail_backlight_init(struct drm_device *dev)
* for power management
*/
-static void oaktrail_init_pm(struct drm_device *dev)
-{
-}
-
/**
* oaktrail_save_display_registers - save registers lost on suspend
* @dev: our DRM device
@@ -190,81 +186,82 @@ static void oaktrail_init_pm(struct drm_device *dev)
static int oaktrail_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
int i;
u32 pp_stat;
/* Display arbitration control + watermarks */
- dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
- dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
- dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
- dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
- dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
- dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
- dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
- dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+ regs->psb.saveDSPARB = PSB_RVDC32(DSPARB);
+ regs->psb.saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ regs->psb.saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ regs->psb.saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ regs->psb.saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ regs->psb.saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ regs->psb.saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
- dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
- dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
- dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
- dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
- dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
- dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
- dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
- dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
- dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
- dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
- dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
- dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
- dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
- dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
- dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
- dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
- dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+ regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF);
+ regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC);
+ regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0);
+ regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1);
+ regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
+ regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
+ regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A);
+ regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A);
+ regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
+ regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A);
+ regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+ regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR);
+ regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
+ regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE);
+ regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF);
+ regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
+ regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
/* Save cursor regs */
- dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
- dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
- dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+ regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+ regs->psb.saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+ regs->psb.saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
/* Save palette (gamma) */
for (i = 0; i < 256; i++)
- dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+ regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_save(dev);
/* Save performance state */
- dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+ regs->psb.savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
/* LVDS state */
- dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
- dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
- dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
- dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
- dev_priv->saveLVDS = PSB_RVDC32(LVDS);
- dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
- dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
- dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
- dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+ regs->psb.savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+ regs->psb.savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+ regs->psb.savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+ regs->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+ regs->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+ regs->psb.saveLVDS = PSB_RVDC32(LVDS);
+ regs->psb.savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ regs->psb.savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+ regs->psb.savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+ regs->psb.savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
/* HW overlay */
- dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
- dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
- dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
- dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
- dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
- dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
- dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+ regs->psb.saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+ regs->psb.saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+ regs->psb.saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+ regs->psb.saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+ regs->psb.saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+ regs->psb.saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+ regs->psb.saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
/* DPST registers */
- dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
+ regs->psb.saveHISTOGRAM_INT_CONTROL_REG =
PSB_RVDC32(HISTOGRAM_INT_CONTROL);
- dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
+ regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG =
PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
- dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ regs->psb.savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
if (dev_priv->iLVDS_enable) {
/* Shut down the panel */
@@ -302,79 +299,80 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
static int oaktrail_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_save_area *regs = &dev_priv->regs;
u32 pp_stat;
int i;
/* Display arbitration + watermarks */
- PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
- PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
- PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
- PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
- PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
- PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
- PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
- PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+ PSB_WVDC32(regs->psb.saveDSPARB, DSPARB);
+ PSB_WVDC32(regs->psb.saveDSPFW1, DSPFW1);
+ PSB_WVDC32(regs->psb.saveDSPFW2, DSPFW2);
+ PSB_WVDC32(regs->psb.saveDSPFW3, DSPFW3);
+ PSB_WVDC32(regs->psb.saveDSPFW4, DSPFW4);
+ PSB_WVDC32(regs->psb.saveDSPFW5, DSPFW5);
+ PSB_WVDC32(regs->psb.saveDSPFW6, DSPFW6);
+ PSB_WVDC32(regs->psb.saveCHICKENBIT, DSPCHICKENBIT);
/* Make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
/* set the plls */
- PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
- PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
+ PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0);
+ PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1);
/* Actually enable it */
- PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
+ PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A);
DRM_UDELAY(150);
/* Restore mode */
- PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
- PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
- PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
- PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
- PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
- PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
- PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
- PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
+ PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A);
+ PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A);
+ PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A);
+ PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A);
+ PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A);
+ PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A);
+ PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
/* Restore performance mode*/
- PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+ PSB_WVDC32(regs->psb.savePERF_MODE, MRST_PERF_MODE);
/* Enable the pipe*/
if (dev_priv->iLVDS_enable)
- PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
+ PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF);
/* Set up the plane*/
- PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
- PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
- PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
+ PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF);
+ PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE);
+ PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF);
/* Enable the plane */
- PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
- PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
+ PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR);
+ PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF);
/* Enable Cursor A */
- PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
- PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
- PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+ PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
+ PSB_WVDC32(regs->psb.saveDSPACURSOR_POS, CURAPOS);
+ PSB_WVDC32(regs->psb.saveDSPACURSOR_BASE, CURABASE);
/* Restore palette (gamma) */
for (i = 0; i < 256; i++)
- PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
+ PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_restore(dev);
if (dev_priv->iLVDS_enable) {
- PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
- PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
- PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
- PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
- PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
- PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
- PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
- PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
- PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
- PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
+ PSB_WVDC32(regs->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+ PSB_WVDC32(regs->psb.saveLVDS, LVDS); /*port 61180h*/
+ PSB_WVDC32(regs->psb.savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(regs->psb.savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+ PSB_WVDC32(regs->psb.savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+ PSB_WVDC32(regs->saveBLC_PWM_CTL, BLC_PWM_CTL);
+ PSB_WVDC32(regs->psb.savePP_ON_DELAYS, LVDSPP_ON);
+ PSB_WVDC32(regs->psb.savePP_OFF_DELAYS, LVDSPP_OFF);
+ PSB_WVDC32(regs->psb.savePP_DIVISOR, PP_CYCLE);
+ PSB_WVDC32(regs->psb.savePP_CONTROL, PP_CONTROL);
}
/* Wait for cycle delay */
@@ -388,20 +386,20 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
} while (pp_stat & 0x10000000);
/* Restore HW overlay */
- PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
- PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
- PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
- PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
- PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
- PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
- PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+ PSB_WVDC32(regs->psb.saveOV_OVADD, OV_OVADD);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC0, OV_OGAMC0);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC1, OV_OGAMC1);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC2, OV_OGAMC2);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC3, OV_OGAMC3);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC4, OV_OGAMC4);
+ PSB_WVDC32(regs->psb.saveOV_OGAMC5, OV_OGAMC5);
/* DPST registers */
- PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
+ PSB_WVDC32(regs->psb.saveHISTOGRAM_INT_CONTROL_REG,
HISTOGRAM_INT_CONTROL);
- PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
+ PSB_WVDC32(regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG,
HISTOGRAM_LOGIC_CONTROL);
- PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+ PSB_WVDC32(regs->psb.savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
return 0;
}
@@ -502,7 +500,6 @@ const struct psb_ops oaktrail_chip_ops = {
.backlight_init = oaktrail_backlight_init,
#endif
- .init_pm = oaktrail_init_pm,
.save_regs = oaktrail_save_display_registers,
.restore_regs = oaktrail_restore_display_registers,
.power_down = oaktrail_power_down,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 025d309..f8b367b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -125,59 +125,6 @@ static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
.nf = { .min = NF_MIN, .max = NF_MAX },
};
-static void wait_for_vblank(struct drm_device *dev)
-{
- /* FIXME: Can we do this as a sleep ? */
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
-}
-
-static void scu_busy_loop(void *scu_base)
-{
- u32 status = 0;
- u32 loop_count = 0;
-
- status = readl(scu_base + 0x04);
- while (status & 1) {
- udelay(1); /* scu processing time is in few u secods */
- status = readl(scu_base + 0x04);
- loop_count++;
- /* break if scu doesn't reset busy bit after huge retry */
- if (loop_count > 1000) {
- DRM_DEBUG_KMS("SCU IPC timed out");
- return;
- }
- }
-}
-
-static void oaktrail_hdmi_reset(struct drm_device *dev)
-{
- void *base;
- /* FIXME: at least make these defines */
- unsigned int scu_ipc_mmio = 0xff11c000;
- int scu_len = 1024;
-
- base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
- if (base == NULL) {
- DRM_ERROR("failed to map SCU mmio\n");
- return;
- }
-
- /* scu ipc: assert hdmi controller reset */
- writel(0xff11d118, base + 0x0c);
- writel(0x7fffffdf, base + 0x80);
- writel(0x42005, base + 0x0);
- scu_busy_loop(base);
-
- /* scu ipc: de-assert hdmi controller reset */
- writel(0xff11d118, base + 0x0c);
- writel(0x7fffffff, base + 0x80);
- writel(0x42005, base + 0x0);
- scu_busy_loop(base);
-
- iounmap(base);
-}
-
static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -208,104 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
-void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- u32 temp;
-
- switch (mode) {
- case DRM_MODE_DPMS_OFF:
- /* Disable VGACNTRL */
- REG_WRITE(VGACNTRL, 0x80000000);
-
- /* Disable plane */
- temp = REG_READ(DSPBCNTR);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
- REG_READ(DSPBCNTR);
- /* Flush the plane changes */
- REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
- REG_READ(DSPBSURF);
- }
-
- /* Disable pipe B */
- temp = REG_READ(PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
- REG_READ(PIPEBCONF);
- }
-
- /* Disable LNW Pipes, etc */
- temp = REG_READ(PCH_PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
- REG_READ(PCH_PIPEBCONF);
- }
- /* wait for pipe off */
- udelay(150);
- /* Disable dpll */
- temp = REG_READ(DPLL_CTRL);
- if ((temp & DPLL_PWRDN) == 0) {
- REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
- REG_WRITE(DPLL_STATUS, 0x1);
- }
- /* wait for dpll off */
- udelay(150);
- break;
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable dpll */
- temp = REG_READ(DPLL_CTRL);
- if ((temp & DPLL_PWRDN) != 0) {
- REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
- temp = REG_READ(DPLL_CLK_ENABLE);
- REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
- REG_READ(DPLL_CLK_ENABLE);
- }
- /* wait for dpll warm up */
- udelay(150);
-
- /* Enable pipe B */
- temp = REG_READ(PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
- REG_READ(PIPEBCONF);
- }
-
- /* Enable LNW Pipe B */
- temp = REG_READ(PCH_PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
- REG_READ(PCH_PIPEBCONF);
- }
- wait_for_vblank(dev);
-
- /* Enable plane */
- temp = REG_READ(DSPBCNTR);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
- REG_READ(DSPBSURF);
- }
- psb_intel_crtc_load_lut(crtc);
- }
- /* DSPARB */
- REG_WRITE(DSPARB, 0x00003fbf);
- /* FW1 */
- REG_WRITE(0x70034, 0x3f880a0a);
- /* FW2 */
- REG_WRITE(0x70038, 0x0b060808);
- /* FW4 */
- REG_WRITE(0x70050, 0x08030404);
- /* FW5 */
- REG_WRITE(0x70054, 0x04040404);
- /* LNC Chicken Bits */
- REG_WRITE(0x70400, 0x4000);
-}
-
-
static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
static int dpms_mode = -1;
@@ -327,182 +176,6 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
HDMI_WRITE(HDMI_VIDEO_REG, temp);
}
-static unsigned int htotal_calculate(struct drm_display_mode *mode)
-{
- u32 htotal, new_crtc_htotal;
-
- htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
-
- /*
- * 1024 x 768 new_crtc_htotal = 0x1024;
- * 1280 x 1024 new_crtc_htotal = 0x0c34;
- */
- new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
-
- return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
-}
-
-static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
- int refclk, struct oaktrail_hdmi_clock *best_clock)
-{
- int np_min, np_max, nr_min, nr_max;
- int np, nr, nf;
-
- np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
- np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
- if (np_min < oaktrail_hdmi_limit.np.min)
- np_min = oaktrail_hdmi_limit.np.min;
- if (np_max > oaktrail_hdmi_limit.np.max)
- np_max = oaktrail_hdmi_limit.np.max;
-
- nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
- nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
- if (nr_min < oaktrail_hdmi_limit.nr.min)
- nr_min = oaktrail_hdmi_limit.nr.min;
- if (nr_max > oaktrail_hdmi_limit.nr.max)
- nr_max = oaktrail_hdmi_limit.nr.max;
-
- np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
- nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
- nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
- DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
-
- /*
- * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
- * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
- */
- best_clock->np = np;
- best_clock->nr = nr - 1;
- best_clock->nf = (nf << 14);
-}
-
-int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
- int pipe = 1;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int refclk;
- struct oaktrail_hdmi_clock clock;
- u32 dspcntr, pipeconf, dpll, temp;
- int dspcntr_reg = DSPBCNTR;
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* XXX: Disable the panel fitter if it was on our pipe */
-
- /* Disable dpll if necessary */
- dpll = REG_READ(DPLL_CTRL);
- if ((dpll & DPLL_PWRDN) == 0) {
- REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
- REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
- REG_WRITE(DPLL_STATUS, 0x1);
- }
- udelay(150);
-
- /* reset controller: FIXME - can we sort out the ioremap mess ? */
- iounmap(hdmi_dev->regs);
- oaktrail_hdmi_reset(dev);
-
- /* program and enable dpll */
- refclk = 25000;
- oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
-
- /* Setting DPLL */
- dpll = REG_READ(DPLL_CTRL);
- dpll &= ~DPLL_PDIV_MASK;
- dpll &= ~(DPLL_PWRDN | DPLL_RESET);
- REG_WRITE(DPLL_CTRL, 0x00000008);
- REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
- REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
- REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
- REG_WRITE(DPLL_UPDATE, 0x80000000);
- REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
- udelay(150);
-
- hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
- if (hdmi_dev->regs == NULL) {
- DRM_ERROR("failed to do hdmi mmio mapping\n");
- return -ENOMEM;
- }
-
- /* configure HDMI */
- HDMI_WRITE(0x1004, 0x1fd);
- HDMI_WRITE(0x2000, 0x1);
- HDMI_WRITE(0x2008, 0x0);
- HDMI_WRITE(0x3130, 0x8);
- HDMI_WRITE(0x101c, 0x1800810);
-
- temp = htotal_calculate(adjusted_mode);
- REG_WRITE(htot_reg, temp);
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
- REG_WRITE(pipesrc_reg,
- ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
-
- REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
- REG_WRITE(PCH_PIPEBSRC,
- ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
-
- temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
- HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
-
- REG_WRITE(dspsize_reg,
- ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
-
- /* Flush the plane changes */
- {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- }
-
- /* Set up the display plane register */
- dspcntr = REG_READ(dspcntr_reg);
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
- dspcntr |= DISPPLANE_SEL_PIPE_B;
- dspcntr |= DISPLAY_PLANE_ENABLE;
-
- /* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
- pipeconf |= PIPEACONF_ENABLE;
-
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
-
- REG_WRITE(PCH_PIPEBCONF, pipeconf);
- REG_READ(PCH_PIPEBCONF);
- wait_for_vblank(dev);
-
- REG_WRITE(dspcntr_reg, dspcntr);
- wait_for_vblank(dev);
-
- return 0;
-}
-
static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -692,7 +365,7 @@ failed_connector:
static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
- {}
+ { 0 }
};
void oaktrail_hdmi_setup(struct drm_device *dev)
@@ -766,6 +439,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct psb_state *regs = &dev_priv->regs.psb;
int i;
/* dpll */
@@ -776,14 +450,14 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
/* pipe B */
- dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
- dev_priv->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
- dev_priv->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
- dev_priv->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
- dev_priv->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
- dev_priv->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
- dev_priv->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
- dev_priv->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+ regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
+ regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
+ regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
+ regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
+ regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
+ regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
+ regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
+ regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
@@ -795,21 +469,21 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
/* plane */
- dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
- dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
- dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
- dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
- dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+ regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
+ regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
+ regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
+ regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
+ regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
+ regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
/* cursor B */
- dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
- dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
- dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+ regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+ regs->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+ regs->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
/* save palette */
for (i = 0; i < 256; i++)
- dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+ regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
}
/* restore HDMI register state */
@@ -817,6 +491,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct psb_state *regs = &dev_priv->regs.psb;
int i;
/* dpll */
@@ -828,13 +503,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
DRM_UDELAY(150);
/* pipe */
- PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
- PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
- PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
- PSB_WVDC32(dev_priv->saveHSYNC_B, HSYNC_B);
- PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
- PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
- PSB_WVDC32(dev_priv->saveVSYNC_B, VSYNC_B);
+ PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC);
+ PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B);
+ PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B);
+ PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B);
+ PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B);
+ PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B);
+ PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
@@ -844,22 +519,22 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
- PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
/* plane */
- PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
- PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
- PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
- PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
- PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
+ PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF);
+ PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE);
+ PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF);
+ PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR);
+ PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF);
/* cursor B */
- PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
- PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
- PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+ PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
+ PSB_WVDC32(regs->saveDSPBCURSOR_POS, CURBPOS);
+ PSB_WVDC32(regs->saveDSPBCURSOR_BASE, CURBBASE);
/* restore palette */
for (i = 0; i < 256; i++)
- PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
+ PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2));
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 7054408..5e84fbd 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -127,7 +127,7 @@ static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
{
struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
- int i, err = 0;
+ int i;
mutex_lock(&i2c_dev->i2c_lock);
@@ -139,9 +139,9 @@ static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
for (i = 0; i < num; i++) {
if (pmsg->len && pmsg->buf) {
if (pmsg->flags & I2C_M_RD)
- err = xfer_read(adap, pmsg);
+ xfer_read(adap, pmsg);
else
- err = xfer_write(adap, pmsg);
+ xfer_write(adap, pmsg);
}
pmsg++; /* next message */
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 238bbe1..654f32b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -192,7 +192,7 @@ static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
gma_power_end(dev);
} else
- ret = ((dev_priv->saveBLC_PWM_CTL &
+ ret = ((dev_priv->regs.saveBLC_PWM_CTL &
BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
@@ -331,7 +331,6 @@ void oaktrail_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_psb_private *dev_priv = dev->dev_private;
struct edid *edid;
- int ret = 0;
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -400,7 +399,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
if (edid) {
drm_mode_connector_update_edid_property(connector,
edid);
- ret = drm_add_edid_modes(connector, edid);
+ drm_add_edid_modes(connector, edid);
kfree(edid);
}
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 9402569..889b854 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -58,7 +58,8 @@ void gma_power_init(struct drm_device *dev)
spin_lock_init(&power_ctrl_lock);
mutex_init(&power_mutex);
- dev_priv->ops->init_pm(dev);
+ if (dev_priv->ops->init_pm)
+ dev_priv->ops->init_pm(dev);
}
/**
@@ -101,9 +102,6 @@ static void gma_resume_display(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = dev->dev_private;
- if (dev_priv->suspended == false)
- return;
-
/* turn on the display power island */
dev_priv->ops->power_up(dev);
dev_priv->suspended = false;
@@ -132,9 +130,9 @@ static void gma_suspend_pci(struct pci_dev *pdev)
pci_save_state(pdev);
pci_read_config_dword(pdev, 0x5C, &bsm);
- dev_priv->saveBSM = bsm;
+ dev_priv->regs.saveBSM = bsm;
pci_read_config_dword(pdev, 0xFC, &vbt);
- dev_priv->saveVBT = vbt;
+ dev_priv->regs.saveVBT = vbt;
pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
@@ -162,8 +160,8 @@ static bool gma_resume_pci(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
- pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+ pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
+ pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
/* restoring MSI address and data in PCIx space */
pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
@@ -195,6 +193,7 @@ int gma_power_suspend(struct device *_dev)
if (!dev_priv->suspended) {
if (dev_priv->display_count) {
mutex_unlock(&power_mutex);
+ dev_err(dev->dev, "GPU hardware busy, cannot suspend\n");
return -EBUSY;
}
psb_irq_uninstall(dev);
@@ -302,7 +301,7 @@ int psb_runtime_suspend(struct device *dev)
int psb_runtime_resume(struct device *dev)
{
- return gma_power_resume(dev);;
+ return gma_power_resume(dev);
}
int psb_runtime_idle(struct device *dev)
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index e5f5906..95d163e 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -177,16 +177,17 @@ static int psb_save_display_registers(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration control + watermarks */
- dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
- dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
- dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
- dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
- dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
- dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
- dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
- dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+ regs->saveDSPARB = PSB_RVDC32(DSPARB);
+ regs->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ regs->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ regs->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ regs->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ regs->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ regs->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Save crtc and output state */
mutex_lock(&dev->mode_config.mutex);
@@ -213,16 +214,17 @@ static int psb_restore_display_registers(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration + watermarks */
- PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
- PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
- PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
- PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
- PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
- PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
- PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
- PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+ PSB_WVDC32(regs->saveDSPARB, DSPARB);
+ PSB_WVDC32(regs->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(regs->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(regs->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(regs->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(regs->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(regs->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(regs->saveCHICKENBIT, DSPCHICKENBIT);
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index f14768f..c34adf9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -60,6 +60,16 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
/* Atom E620 */
{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
#endif
+#if defined(CONFIG_DRM_MEDFIELD)
+ {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+#endif
#if defined(CONFIG_DRM_GMA3600)
{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
@@ -70,7 +80,7 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
#endif
- { 0, 0, 0}
+ { 0, }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -78,27 +88,27 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
* Standard IOCTLs.
*/
-#define DRM_IOCTL_PSB_ADB \
+#define DRM_IOCTL_GMA_ADB \
DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
-#define DRM_IOCTL_PSB_MODE_OPERATION \
+#define DRM_IOCTL_GMA_MODE_OPERATION \
DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
struct drm_psb_mode_operation_arg)
-#define DRM_IOCTL_PSB_STOLEN_MEMORY \
+#define DRM_IOCTL_GMA_STOLEN_MEMORY \
DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
struct drm_psb_stolen_memory_arg)
-#define DRM_IOCTL_PSB_GAMMA \
+#define DRM_IOCTL_GMA_GAMMA \
DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
struct drm_psb_dpst_lut_arg)
-#define DRM_IOCTL_PSB_DPST_BL \
+#define DRM_IOCTL_GMA_DPST_BL \
DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
uint32_t)
-#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
+#define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID \
DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
struct drm_psb_get_pipe_from_crtc_id_arg)
-#define DRM_IOCTL_PSB_GEM_CREATE \
+#define DRM_IOCTL_GMA_GEM_CREATE \
DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
struct drm_psb_gem_create)
-#define DRM_IOCTL_PSB_GEM_MMAP \
+#define DRM_IOCTL_GMA_GEM_MMAP \
DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
struct drm_psb_gem_mmap)
@@ -113,22 +123,19 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-#define PSB_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
-
static struct drm_ioctl_desc psb_ioctls[] = {
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+ DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+ DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl,
DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+ DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID,
psb_intel_get_pipe_from_crtc_id, 0),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+ DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+ DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl,
DRM_UNLOCKED | DRM_AUTH),
};
@@ -268,10 +275,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct drm_psb_private *dev_priv;
unsigned long resource_start;
- struct psb_gtt *pg;
unsigned long irqflags;
int ret = -ENOMEM;
- uint32_t tt_pages;
struct drm_connector *connector;
struct psb_intel_encoder *psb_intel_encoder;
@@ -283,6 +288,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev->dev_private = (void *) dev_priv;
+ pci_set_master(dev->pdev);
+
if (!IS_PSB(dev)) {
if (pci_enable_msi(dev->pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
@@ -327,12 +334,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->mmu)
goto out_err;
- pg = &dev_priv->gtt;
-
- tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
- (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
-
-
dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
if (!dev_priv->pf_pd)
goto out_err;
@@ -409,7 +410,7 @@ out_err:
return ret;
}
-int psb_driver_device_is_agp(struct drm_device *dev)
+static int psb_driver_device_is_agp(struct drm_device *dev)
{
return 0;
}
@@ -600,7 +601,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
/* When a client dies:
* - Check for and clean up flipped page state
*/
-void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
{
}
@@ -677,7 +678,9 @@ static struct pci_driver psb_pci_driver = {
.id_table = pciidlist,
.probe = psb_probe,
.remove = psb_remove,
- .driver.pm = &psb_pm_ops,
+ .driver = {
+ .pm = &psb_pm_ops,
+ }
};
static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index eb1568a..40ce2c9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -276,6 +276,217 @@ struct intel_gmbus {
u32 reg0;
};
+/*
+ * Register save state. This is used to hold the context when the
+ * device is powered off. In the case of Oaktrail this can (but does not
+ * yet) include screen blank. Operations occuring during the save
+ * update the register cache instead.
+ */
+struct psb_state {
+ uint32_t saveDSPACNTR;
+ uint32_t saveDSPBCNTR;
+ uint32_t savePIPEACONF;
+ uint32_t savePIPEBCONF;
+ uint32_t savePIPEASRC;
+ uint32_t savePIPEBSRC;
+ uint32_t saveFPA0;
+ uint32_t saveFPA1;
+ uint32_t saveDPLL_A;
+ uint32_t saveDPLL_A_MD;
+ uint32_t saveHTOTAL_A;
+ uint32_t saveHBLANK_A;
+ uint32_t saveHSYNC_A;
+ uint32_t saveVTOTAL_A;
+ uint32_t saveVBLANK_A;
+ uint32_t saveVSYNC_A;
+ uint32_t saveDSPASTRIDE;
+ uint32_t saveDSPASIZE;
+ uint32_t saveDSPAPOS;
+ uint32_t saveDSPABASE;
+ uint32_t saveDSPASURF;
+ uint32_t saveDSPASTATUS;
+ uint32_t saveFPB0;
+ uint32_t saveFPB1;
+ uint32_t saveDPLL_B;
+ uint32_t saveDPLL_B_MD;
+ uint32_t saveHTOTAL_B;
+ uint32_t saveHBLANK_B;
+ uint32_t saveHSYNC_B;
+ uint32_t saveVTOTAL_B;
+ uint32_t saveVBLANK_B;
+ uint32_t saveVSYNC_B;
+ uint32_t saveDSPBSTRIDE;
+ uint32_t saveDSPBSIZE;
+ uint32_t saveDSPBPOS;
+ uint32_t saveDSPBBASE;
+ uint32_t saveDSPBSURF;
+ uint32_t saveDSPBSTATUS;
+ uint32_t saveVCLK_DIVISOR_VGA0;
+ uint32_t saveVCLK_DIVISOR_VGA1;
+ uint32_t saveVCLK_POST_DIV;
+ uint32_t saveVGACNTRL;
+ uint32_t saveADPA;
+ uint32_t saveLVDS;
+ uint32_t saveDVOA;
+ uint32_t saveDVOB;
+ uint32_t saveDVOC;
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePaletteA[256];
+ uint32_t savePaletteB[256];
+ uint32_t saveCLOCKGATING;
+ uint32_t saveDSPARB;
+ uint32_t saveDSPATILEOFF;
+ uint32_t saveDSPBTILEOFF;
+ uint32_t saveDSPAADDR;
+ uint32_t saveDSPBADDR;
+ uint32_t savePFIT_AUTO_RATIOS;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t savePP_ON_DELAYS;
+ uint32_t savePP_OFF_DELAYS;
+ uint32_t savePP_DIVISOR;
+ uint32_t saveBCLRPAT_A;
+ uint32_t saveBCLRPAT_B;
+ uint32_t saveDSPALINOFF;
+ uint32_t saveDSPBLINOFF;
+ uint32_t savePERF_MODE;
+ uint32_t saveDSPFW1;
+ uint32_t saveDSPFW2;
+ uint32_t saveDSPFW3;
+ uint32_t saveDSPFW4;
+ uint32_t saveDSPFW5;
+ uint32_t saveDSPFW6;
+ uint32_t saveCHICKENBIT;
+ uint32_t saveDSPACURSOR_CTRL;
+ uint32_t saveDSPBCURSOR_CTRL;
+ uint32_t saveDSPACURSOR_BASE;
+ uint32_t saveDSPBCURSOR_BASE;
+ uint32_t saveDSPACURSOR_POS;
+ uint32_t saveDSPBCURSOR_POS;
+ uint32_t save_palette_a[256];
+ uint32_t save_palette_b[256];
+ uint32_t saveOV_OVADD;
+ uint32_t saveOV_OGAMC0;
+ uint32_t saveOV_OGAMC1;
+ uint32_t saveOV_OGAMC2;
+ uint32_t saveOV_OGAMC3;
+ uint32_t saveOV_OGAMC4;
+ uint32_t saveOV_OGAMC5;
+ uint32_t saveOVC_OVADD;
+ uint32_t saveOVC_OGAMC0;
+ uint32_t saveOVC_OGAMC1;
+ uint32_t saveOVC_OGAMC2;
+ uint32_t saveOVC_OGAMC3;
+ uint32_t saveOVC_OGAMC4;
+ uint32_t saveOVC_OGAMC5;
+
+ /* DPST register save */
+ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+ uint32_t savePWM_CONTROL_LOGIC;
+};
+
+struct medfield_state {
+ uint32_t saveDPLL_A;
+ uint32_t saveFPA0;
+ uint32_t savePIPEACONF;
+ uint32_t saveHTOTAL_A;
+ uint32_t saveHBLANK_A;
+ uint32_t saveHSYNC_A;
+ uint32_t saveVTOTAL_A;
+ uint32_t saveVBLANK_A;
+ uint32_t saveVSYNC_A;
+ uint32_t savePIPEASRC;
+ uint32_t saveDSPASTRIDE;
+ uint32_t saveDSPALINOFF;
+ uint32_t saveDSPATILEOFF;
+ uint32_t saveDSPASIZE;
+ uint32_t saveDSPAPOS;
+ uint32_t saveDSPASURF;
+ uint32_t saveDSPACNTR;
+ uint32_t saveDSPASTATUS;
+ uint32_t save_palette_a[256];
+ uint32_t saveMIPI;
+
+ uint32_t saveDPLL_B;
+ uint32_t saveFPB0;
+ uint32_t savePIPEBCONF;
+ uint32_t saveHTOTAL_B;
+ uint32_t saveHBLANK_B;
+ uint32_t saveHSYNC_B;
+ uint32_t saveVTOTAL_B;
+ uint32_t saveVBLANK_B;
+ uint32_t saveVSYNC_B;
+ uint32_t savePIPEBSRC;
+ uint32_t saveDSPBSTRIDE;
+ uint32_t saveDSPBLINOFF;
+ uint32_t saveDSPBTILEOFF;
+ uint32_t saveDSPBSIZE;
+ uint32_t saveDSPBPOS;
+ uint32_t saveDSPBSURF;
+ uint32_t saveDSPBCNTR;
+ uint32_t saveDSPBSTATUS;
+ uint32_t save_palette_b[256];
+
+ uint32_t savePIPECCONF;
+ uint32_t saveHTOTAL_C;
+ uint32_t saveHBLANK_C;
+ uint32_t saveHSYNC_C;
+ uint32_t saveVTOTAL_C;
+ uint32_t saveVBLANK_C;
+ uint32_t saveVSYNC_C;
+ uint32_t savePIPECSRC;
+ uint32_t saveDSPCSTRIDE;
+ uint32_t saveDSPCLINOFF;
+ uint32_t saveDSPCTILEOFF;
+ uint32_t saveDSPCSIZE;
+ uint32_t saveDSPCPOS;
+ uint32_t saveDSPCSURF;
+ uint32_t saveDSPCCNTR;
+ uint32_t saveDSPCSTATUS;
+ uint32_t save_palette_c[256];
+ uint32_t saveMIPI_C;
+
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveHDMIPHYMISCCTL;
+ uint32_t saveHDMIB_CONTROL;
+};
+
+struct cdv_state {
+ uint32_t saveDSPCLK_GATE_D;
+ uint32_t saveRAMCLK_GATE_D;
+ uint32_t saveDSPARB;
+ uint32_t saveDSPFW[6];
+ uint32_t saveADPA;
+ uint32_t savePP_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveLVDS;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePP_ON_DELAYS;
+ uint32_t savePP_OFF_DELAYS;
+ uint32_t savePP_CYCLE;
+ uint32_t saveVGACNTRL;
+ uint32_t saveIER;
+ uint32_t saveIMR;
+ u8 saveLBB;
+};
+
+struct psb_save_area {
+ uint32_t saveBSM;
+ uint32_t saveVBT;
+ union {
+ struct psb_state psb;
+ struct medfield_state mdfld;
+ struct cdv_state cdv;
+ };
+ uint32_t saveBLC_PWM_CTL2;
+ uint32_t saveBLC_PWM_CTL;
+};
+
struct psb_ops;
#define PSB_NUM_PIPE 3
@@ -397,216 +608,21 @@ struct drm_psb_private {
struct oaktrail_vbt vbt_data;
struct oaktrail_gct_data gct_data;
- /* MIPI Panel type etc */
- int panel_id;
- bool dual_mipi; /* dual display - DPI & DBI */
- bool dpi_panel_on; /* The DPI panel power is on */
- bool dpi_panel_on2; /* The DPI panel power is on */
- bool dbi_panel_on; /* The DBI panel power is on */
- bool dbi_panel_on2; /* The DBI panel power is on */
- u32 dsr_fb_update; /* DSR FB update counter */
-
- /* Moorestown HDMI state */
+ /* Oaktrail HDMI state */
struct oaktrail_hdmi_dev *hdmi_priv;
-
- /* Moorestown pipe config register value cache */
- uint32_t pipeconf;
- uint32_t pipeconf1;
- uint32_t pipeconf2;
-
- /* Moorestown plane control register value cache */
- uint32_t dspcntr;
- uint32_t dspcntr1;
- uint32_t dspcntr2;
-
- /* Moorestown MM backlight cache */
- uint8_t saveBKLTCNT;
- uint8_t saveBKLTREQ;
- uint8_t saveBKLTBRTL;
-
+
/*
* Register state
*/
- uint32_t saveDSPACNTR;
- uint32_t saveDSPBCNTR;
- uint32_t savePIPEACONF;
- uint32_t savePIPEBCONF;
- uint32_t savePIPEASRC;
- uint32_t savePIPEBSRC;
- uint32_t saveFPA0;
- uint32_t saveFPA1;
- uint32_t saveDPLL_A;
- uint32_t saveDPLL_A_MD;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPABASE;
- uint32_t saveDSPASURF;
- uint32_t saveDSPASTATUS;
- uint32_t saveFPB0;
- uint32_t saveFPB1;
- uint32_t saveDPLL_B;
- uint32_t saveDPLL_B_MD;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBBASE;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBSTATUS;
- uint32_t saveVCLK_DIVISOR_VGA0;
- uint32_t saveVCLK_DIVISOR_VGA1;
- uint32_t saveVCLK_POST_DIV;
- uint32_t saveVGACNTRL;
- uint32_t saveADPA;
- uint32_t saveLVDS;
- uint32_t saveDVOA;
- uint32_t saveDVOB;
- uint32_t saveDVOC;
- uint32_t savePP_ON;
- uint32_t savePP_OFF;
- uint32_t savePP_CONTROL;
- uint32_t savePP_CYCLE;
- uint32_t savePFIT_CONTROL;
- uint32_t savePaletteA[256];
- uint32_t savePaletteB[256];
- uint32_t saveBLC_PWM_CTL2;
- uint32_t saveBLC_PWM_CTL;
- uint32_t saveCLOCKGATING;
- uint32_t saveDSPARB;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPAADDR;
- uint32_t saveDSPBADDR;
- uint32_t savePFIT_AUTO_RATIOS;
- uint32_t savePFIT_PGM_RATIOS;
- uint32_t savePP_ON_DELAYS;
- uint32_t savePP_OFF_DELAYS;
- uint32_t savePP_DIVISOR;
- uint32_t saveBSM;
- uint32_t saveVBT;
- uint32_t saveBCLRPAT_A;
- uint32_t saveBCLRPAT_B;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPBLINOFF;
- uint32_t savePERF_MODE;
- uint32_t saveDSPFW1;
- uint32_t saveDSPFW2;
- uint32_t saveDSPFW3;
- uint32_t saveDSPFW4;
- uint32_t saveDSPFW5;
- uint32_t saveDSPFW6;
- uint32_t saveCHICKENBIT;
- uint32_t saveDSPACURSOR_CTRL;
- uint32_t saveDSPBCURSOR_CTRL;
- uint32_t saveDSPACURSOR_BASE;
- uint32_t saveDSPBCURSOR_BASE;
- uint32_t saveDSPACURSOR_POS;
- uint32_t saveDSPBCURSOR_POS;
- uint32_t save_palette_a[256];
- uint32_t save_palette_b[256];
- uint32_t saveOV_OVADD;
- uint32_t saveOV_OGAMC0;
- uint32_t saveOV_OGAMC1;
- uint32_t saveOV_OGAMC2;
- uint32_t saveOV_OGAMC3;
- uint32_t saveOV_OGAMC4;
- uint32_t saveOV_OGAMC5;
- uint32_t saveOVC_OVADD;
- uint32_t saveOVC_OGAMC0;
- uint32_t saveOVC_OGAMC1;
- uint32_t saveOVC_OGAMC2;
- uint32_t saveOVC_OGAMC3;
- uint32_t saveOVC_OGAMC4;
- uint32_t saveOVC_OGAMC5;
+
+ struct psb_save_area regs;
/* MSI reg save */
uint32_t msi_addr;
uint32_t msi_data;
- /* Medfield specific register save state */
- uint32_t saveHDMIPHYMISCCTL;
- uint32_t saveHDMIB_CONTROL;
- uint32_t saveDSPCCNTR;
- uint32_t savePIPECCONF;
- uint32_t savePIPECSRC;
- uint32_t saveHTOTAL_C;
- uint32_t saveHBLANK_C;
- uint32_t saveHSYNC_C;
- uint32_t saveVTOTAL_C;
- uint32_t saveVBLANK_C;
- uint32_t saveVSYNC_C;
- uint32_t saveDSPCSTRIDE;
- uint32_t saveDSPCSIZE;
- uint32_t saveDSPCPOS;
- uint32_t saveDSPCSURF;
- uint32_t saveDSPCSTATUS;
- uint32_t saveDSPCLINOFF;
- uint32_t saveDSPCTILEOFF;
- uint32_t saveDSPCCURSOR_CTRL;
- uint32_t saveDSPCCURSOR_BASE;
- uint32_t saveDSPCCURSOR_POS;
- uint32_t save_palette_c[256];
- uint32_t saveOV_OVADD_C;
- uint32_t saveOV_OGAMC0_C;
- uint32_t saveOV_OGAMC1_C;
- uint32_t saveOV_OGAMC2_C;
- uint32_t saveOV_OGAMC3_C;
- uint32_t saveOV_OGAMC4_C;
- uint32_t saveOV_OGAMC5_C;
-
- /* DSI register save */
- uint32_t saveDEVICE_READY_REG;
- uint32_t saveINTR_EN_REG;
- uint32_t saveDSI_FUNC_PRG_REG;
- uint32_t saveHS_TX_TIMEOUT_REG;
- uint32_t saveLP_RX_TIMEOUT_REG;
- uint32_t saveTURN_AROUND_TIMEOUT_REG;
- uint32_t saveDEVICE_RESET_REG;
- uint32_t saveDPI_RESOLUTION_REG;
- uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
- uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
- uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
- uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
- uint32_t saveVERT_SYNC_PAD_COUNT_REG;
- uint32_t saveVERT_BACK_PORCH_COUNT_REG;
- uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
- uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
- uint32_t saveINIT_COUNT_REG;
- uint32_t saveMAX_RET_PAK_REG;
- uint32_t saveVIDEO_FMT_REG;
- uint32_t saveEOT_DISABLE_REG;
- uint32_t saveLP_BYTECLK_REG;
- uint32_t saveHS_LS_DBI_ENABLE_REG;
- uint32_t saveTXCLKESC_REG;
- uint32_t saveDPHY_PARAM_REG;
- uint32_t saveMIPI_CONTROL_REG;
- uint32_t saveMIPI;
- uint32_t saveMIPI_C;
-
- /* DPST register save */
- uint32_t saveHISTOGRAM_INT_CONTROL_REG;
- uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
- uint32_t savePWM_CONTROL_LOGIC;
/*
- * DSI info.
- */
- void * dbi_dsr_info;
- void * dbi_dpu_info;
- void * dsi_configs[2];
- /*
* LID-Switch
*/
spinlock_t lid_lock;
@@ -635,6 +651,24 @@ struct drm_psb_private {
/* 2D acceleration */
spinlock_t lock_2d;
+
+ /*
+ * Panel brightness
+ */
+ int brightness;
+ int brightness_adjusted;
+
+ bool dsr_enable;
+ u32 dsr_fb_update;
+ bool dpi_panel_on[3];
+ void *dsi_configs[2];
+ u32 bpp;
+ u32 bpp2;
+
+ u32 pipeconf[3];
+ u32 dspcntr[3];
+
+ int mdfld_panel_id;
};
@@ -830,6 +864,9 @@ extern const struct psb_ops psb_chip_ops;
/* oaktrail_device.c */
extern const struct psb_ops oaktrail_chip_ops;
+/* mdlfd_device.c */
+extern const struct psb_ops mdfld_chip_ops;
+
/* cdv_device.c */
extern const struct psb_ops cdv_chip_ops;
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 49e9835..2616558 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -333,7 +333,7 @@ void psb_intel_wait_for_vblank(struct drm_device *dev)
mdelay(20);
}
-int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
@@ -433,7 +433,6 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
- bool enabled;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -518,8 +517,6 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3F3E);
}
@@ -611,8 +608,8 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
int refclk;
struct psb_intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
- bool ok, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false;
+ bool ok, is_sdvo = false;
+ bool is_lvds = false, is_tv = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -637,15 +634,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_SDVO:
is_sdvo = true;
break;
- case INTEL_OUTPUT_DVO:
- is_dvo = true;
- break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
}
}
@@ -845,7 +836,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
- dev_priv->save_palette_a[i] =
+ dev_priv->regs.psb.save_palette_a[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
@@ -1141,18 +1132,20 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
gma_power_end(dev);
} else {
dpll = (pipe == 0) ?
- dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+ dev_priv->regs.psb.saveDPLL_A :
+ dev_priv->regs.psb.saveDPLL_B;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = (pipe == 0) ?
- dev_priv->saveFPA0 :
- dev_priv->saveFPB0;
+ dev_priv->regs.psb.saveFPA0 :
+ dev_priv->regs.psb.saveFPB0;
else
fp = (pipe == 0) ?
- dev_priv->saveFPA1 :
- dev_priv->saveFPB1;
+ dev_priv->regs.psb.saveFPA1 :
+ dev_priv->regs.psb.saveFPB1;
- is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+ is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
+ LVDS_PORT_EN);
}
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
@@ -1218,13 +1211,17 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
gma_power_end(dev);
} else {
htot = (pipe == 0) ?
- dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+ dev_priv->regs.psb.saveHTOTAL_A :
+ dev_priv->regs.psb.saveHTOTAL_B;
hsync = (pipe == 0) ?
- dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+ dev_priv->regs.psb.saveHSYNC_A :
+ dev_priv->regs.psb.saveHSYNC_B;
vtot = (pipe == 0) ?
- dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+ dev_priv->regs.psb.saveVTOTAL_A :
+ dev_priv->regs.psb.saveVTOTAL_B;
vsync = (pipe == 0) ?
- dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+ dev_priv->regs.psb.saveVSYNC_A :
+ dev_priv->regs.psb.saveVSYNC_B;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -1419,13 +1416,6 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
return index_mask;
}
-
-void psb_intel_modeset_cleanup(struct drm_device *dev)
-{
- drm_mode_config_cleanup(dev);
-}
-
-
/* current intel driver doesn't take advantage of encoders
always give back the encoder for the connector
*/
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 0a43758..c83f5b5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -77,7 +77,7 @@ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
ret = REG_READ(BLC_PWM_CTL);
gma_power_end(dev);
} else /* Powered off, use the saved value */
- ret = dev_priv->saveBLC_PWM_CTL;
+ ret = dev_priv->regs.saveBLC_PWM_CTL;
/* Top 15bits hold the frequency mask */
ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
@@ -86,7 +86,7 @@ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
ret *= 2; /* Return a 16bit range as needed for setting */
if (ret == 0)
dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
- REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
+ REG_READ(BLC_PWM_CTL), dev_priv->regs.saveBLC_PWM_CTL);
return ret;
}
@@ -203,13 +203,13 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
REG_WRITE(BLC_PWM_CTL,
(blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
- dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
gma_power_end(dev);
} else {
- blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+ blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
~BACKLIGHT_DUTY_CYCLE_MASK;
- dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
}
}
@@ -283,7 +283,7 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
/*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+ dev_priv->backlight_duty_cycle = (dev_priv->regs.saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index fcc0af0..e89d3a2 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -177,6 +177,9 @@
#define LVDSPP_OFF 0x6120c
#define PP_CYCLE 0x61210
+#define PP_ON_DELAYS 0x61208 /* Cedartrail */
+#define PP_OFF_DELAYS 0x6120c /* Cedartrail */
+
#define PFIT_CONTROL 0x61230
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
@@ -1252,6 +1255,12 @@ No status bits are changed.
# define SB_BYTE_ENABLE_SHIFT 4
# define SB_BUSY (1 << 0)
+#define DSPCLK_GATE_D 0x6200
+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */
+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6)
+
+#define RAMCLK_GATE_D 0x6210
/* 32-bit value read/written from the DPIO reg. */
#define SB_DATA 0x02104 /* cedarview */
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 88b4297..36330ca 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1301,7 +1301,7 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
return NULL;
}
-enum drm_connector_status
+static enum drm_connector_status
psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
@@ -2312,10 +2312,8 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
psb_intel_sdvo_connector->max_##name = data_value[0]; \
psb_intel_sdvo_connector->cur_##name = response; \
psb_intel_sdvo_connector->name = \
- drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
- psb_intel_sdvo_connector->name->values[0] = 0; \
- psb_intel_sdvo_connector->name->values[1] = data_value[0]; \
drm_connector_attach_property(connector, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
@@ -2349,25 +2347,19 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
psb_intel_sdvo_connector->left_margin = data_value[0] - response;
psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
psb_intel_sdvo_connector->left =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->left)
return false;
- psb_intel_sdvo_connector->left->values[0] = 0;
- psb_intel_sdvo_connector->left->values[1] = data_value[0];
drm_connector_attach_property(connector,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
psb_intel_sdvo_connector->right =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->right)
return false;
- psb_intel_sdvo_connector->right->values[0] = 0;
- psb_intel_sdvo_connector->right->values[1] = data_value[0];
drm_connector_attach_property(connector,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
@@ -2391,25 +2383,19 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
psb_intel_sdvo_connector->top_margin = data_value[0] - response;
psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
psb_intel_sdvo_connector->top =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
+ drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->top)
return false;
- psb_intel_sdvo_connector->top->values[0] = 0;
- psb_intel_sdvo_connector->top->values[1] = data_value[0];
drm_connector_attach_property(connector,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
psb_intel_sdvo_connector->bottom =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
+ drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->bottom)
return false;
- psb_intel_sdvo_connector->bottom->values[0] = 0;
- psb_intel_sdvo_connector->bottom->values[1] = data_value[0];
drm_connector_attach_property(connector,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
@@ -2438,12 +2424,10 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
psb_intel_sdvo_connector->max_dot_crawl = 1;
psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
psb_intel_sdvo_connector->dot_crawl =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
- psb_intel_sdvo_connector->dot_crawl->values[0] = 0;
- psb_intel_sdvo_connector->dot_crawl->values[1] = 1;
drm_connector_attach_property(connector,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 7be802b..1869586 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -27,6 +27,8 @@
#include "psb_reg.h"
#include "psb_intel_reg.h"
#include "power.h"
+#include "psb_irq.h"
+#include "mdfld_output.h"
/*
* inline functions
@@ -113,7 +115,7 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
}
}
-void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
{
if (gma_power_begin(dev_priv->dev, false)) {
u32 pipe_event = mid_pipe_event(pipe);
@@ -124,7 +126,7 @@ void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
}
}
-void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
{
if (dev_priv->pipestat[pipe] == 0) {
if (gma_power_begin(dev_priv->dev, false)) {
@@ -453,6 +455,11 @@ int psb_enable_vblank(struct drm_device *dev, int pipe)
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
+ /* Medfield is different - we should perhaps extract out vblank
+ and blacklight etc ops */
+ if (IS_MFLD(dev))
+ return mdfld_enable_te(dev, pipe);
+
if (gma_power_begin(dev, false)) {
reg_val = REG_READ(pipeconf_reg);
gma_power_end(dev);
@@ -485,6 +492,8 @@ void psb_disable_vblank(struct drm_device *dev, int pipe)
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ if (IS_MFLD(dev))
+ mdfld_disable_te(dev, pipe);
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (pipe == 0)
@@ -499,6 +508,55 @@ void psb_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
+/*
+ * It is used to enable TE interrupt
+ */
+int mdfld_enable_te(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+ uint32_t reg_val = 0;
+ uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+ if (gma_power_begin(dev, false)) {
+ reg_val = REG_READ(pipeconf_reg);
+ gma_power_end(dev);
+ }
+
+ if (!(reg_val & PIPEACONF_ENABLE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ mid_enable_pipe_event(dev_priv, pipe);
+ psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+/*
+ * It is used to disable TE interrupt
+ */
+void mdfld_disable_te(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ if (!dev_priv->dsr_enable)
+ return;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ mid_disable_pipe_event(dev_priv, pipe);
+ psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 216fda3..603045b 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -42,4 +42,6 @@ int psb_enable_vblank(struct drm_device *dev, int pipe);
void psb_disable_vblank(struct drm_device *dev, int pipe);
u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
+int mdfld_enable_te(struct drm_device *dev, int pipe);
+void mdfld_disable_te(struct drm_device *dev, int pipe);
#endif /* _SYSIRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
new file mode 100644
index 0000000..4a07ab5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "tc35876x-dsi-lvds.h"
+#include <linux/i2c/tc35876x.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct i2c_client *tc35876x_client;
+static struct i2c_client *cmi_lcd_i2c_client;
+
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+
+/* DSI D-PHY Layer Registers */
+#define D0W_DPHYCONTTX 0x0004
+#define CLW_DPHYCONTRX 0x0020
+#define D0W_DPHYCONTRX 0x0024
+#define D1W_DPHYCONTRX 0x0028
+#define D2W_DPHYCONTRX 0x002C
+#define D3W_DPHYCONTRX 0x0030
+#define COM_DPHYCONTRX 0x0038
+#define CLW_CNTRL 0x0040
+#define D0W_CNTRL 0x0044
+#define D1W_CNTRL 0x0048
+#define D2W_CNTRL 0x004C
+#define D3W_CNTRL 0x0050
+#define DFTMODE_CNTRL 0x0054
+
+/* DSI PPI Layer Registers */
+#define PPI_STARTPPI 0x0104
+#define PPI_BUSYPPI 0x0108
+#define PPI_LINEINITCNT 0x0110
+#define PPI_LPTXTIMECNT 0x0114
+#define PPI_LANEENABLE 0x0134
+#define PPI_TX_RX_TA 0x013C
+#define PPI_CLS_ATMR 0x0140
+#define PPI_D0S_ATMR 0x0144
+#define PPI_D1S_ATMR 0x0148
+#define PPI_D2S_ATMR 0x014C
+#define PPI_D3S_ATMR 0x0150
+#define PPI_D0S_CLRSIPOCOUNT 0x0164
+#define PPI_D1S_CLRSIPOCOUNT 0x0168
+#define PPI_D2S_CLRSIPOCOUNT 0x016C
+#define PPI_D3S_CLRSIPOCOUNT 0x0170
+#define CLS_PRE 0x0180
+#define D0S_PRE 0x0184
+#define D1S_PRE 0x0188
+#define D2S_PRE 0x018C
+#define D3S_PRE 0x0190
+#define CLS_PREP 0x01A0
+#define D0S_PREP 0x01A4
+#define D1S_PREP 0x01A8
+#define D2S_PREP 0x01AC
+#define D3S_PREP 0x01B0
+#define CLS_ZERO 0x01C0
+#define D0S_ZERO 0x01C4
+#define D1S_ZERO 0x01C8
+#define D2S_ZERO 0x01CC
+#define D3S_ZERO 0x01D0
+#define PPI_CLRFLG 0x01E0
+#define PPI_CLRSIPO 0x01E4
+#define HSTIMEOUT 0x01F0
+#define HSTIMEOUTENABLE 0x01F4
+
+/* DSI Protocol Layer Registers */
+#define DSI_STARTDSI 0x0204
+#define DSI_BUSYDSI 0x0208
+#define DSI_LANEENABLE 0x0210
+#define DSI_LANESTATUS0 0x0214
+#define DSI_LANESTATUS1 0x0218
+#define DSI_INTSTATUS 0x0220
+#define DSI_INTMASK 0x0224
+#define DSI_INTCLR 0x0228
+#define DSI_LPTXTO 0x0230
+
+/* DSI General Registers */
+#define DSIERRCNT 0x0300
+
+/* DSI Application Layer Registers */
+#define APLCTRL 0x0400
+#define RDPKTLN 0x0404
+
+/* Video Path Registers */
+#define VPCTRL 0x0450
+#define HTIM1 0x0454
+#define HTIM2 0x0458
+#define VTIM1 0x045C
+#define VTIM2 0x0460
+#define VFUEN 0x0464
+
+/* LVDS Registers */
+#define LVMX0003 0x0480
+#define LVMX0407 0x0484
+#define LVMX0811 0x0488
+#define LVMX1215 0x048C
+#define LVMX1619 0x0490
+#define LVMX2023 0x0494
+#define LVMX2427 0x0498
+#define LVCFG 0x049C
+#define LVPHY0 0x04A0
+#define LVPHY1 0x04A4
+
+/* System Registers */
+#define SYSSTAT 0x0500
+#define SYSRST 0x0504
+
+/* GPIO Registers */
+/*#define GPIOC 0x0520*/
+#define GPIOO 0x0524
+#define GPIOI 0x0528
+
+/* I2C Registers */
+#define I2CTIMCTRL 0x0540
+#define I2CMADDR 0x0544
+#define WDATAQ 0x0548
+#define RDATAQ 0x054C
+
+/* Chip/Rev Registers */
+#define IDREG 0x0580
+
+/* Debug Registers */
+#define DEBUG00 0x05A0
+#define DEBUG01 0x05A4
+
+/* Panel CABC registers */
+#define PANEL_PWM_CONTROL 0x90
+#define PANEL_FREQ_DIVIDER_HI 0x91
+#define PANEL_FREQ_DIVIDER_LO 0x92
+#define PANEL_DUTY_CONTROL 0x93
+#define PANEL_MODIFY_RGB 0x94
+#define PANEL_FRAMERATE_CONTROL 0x96
+#define PANEL_PWM_MIN 0x97
+#define PANEL_PWM_REF 0x98
+#define PANEL_PWM_MAX 0x99
+#define PANEL_ALLOW_DISTORT 0x9A
+#define PANEL_BYPASS_PWMI 0x9B
+
+/* Panel color management registers */
+#define PANEL_CM_ENABLE 0x700
+#define PANEL_CM_HUE 0x701
+#define PANEL_CM_SATURATION 0x702
+#define PANEL_CM_INTENSITY 0x703
+#define PANEL_CM_BRIGHTNESS 0x704
+#define PANEL_CM_CE_ENABLE 0x705
+#define PANEL_CM_PEAK_EN 0x710
+#define PANEL_CM_GAIN 0x711
+#define PANEL_CM_HUETABLE_START 0x730
+#define PANEL_CM_HUETABLE_END 0x747 /* inclusive */
+
+/* Input muxing for registers LVMX0003...LVMX2427 */
+enum {
+ INPUT_R0, /* 0 */
+ INPUT_R1,
+ INPUT_R2,
+ INPUT_R3,
+ INPUT_R4,
+ INPUT_R5,
+ INPUT_R6,
+ INPUT_R7,
+ INPUT_G0, /* 8 */
+ INPUT_G1,
+ INPUT_G2,
+ INPUT_G3,
+ INPUT_G4,
+ INPUT_G5,
+ INPUT_G6,
+ INPUT_G7,
+ INPUT_B0, /* 16 */
+ INPUT_B1,
+ INPUT_B2,
+ INPUT_B3,
+ INPUT_B4,
+ INPUT_B5,
+ INPUT_B6,
+ INPUT_B7,
+ INPUT_HSYNC, /* 24 */
+ INPUT_VSYNC,
+ INPUT_DE,
+ LOGIC_0,
+ /* 28...31 undefined */
+};
+
+#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00) \
+ (FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) | \
+ FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0))
+
+/**
+ * tc35876x_regw - Write DSI-LVDS bridge register using I2C
+ * @client: struct i2c_client to use
+ * @reg: register address
+ * @value: value to write
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value)
+{
+ int r;
+ u8 tx_data[] = {
+ /* NOTE: Register address big-endian, data little-endian. */
+ (reg >> 8) & 0xff,
+ reg & 0xff,
+ value & 0xff,
+ (value >> 8) & 0xff,
+ (value >> 16) & 0xff,
+ (value >> 24) & 0xff,
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .buf = tx_data,
+ .len = ARRAY_SIZE(tx_data),
+ },
+ };
+
+ r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (r < 0) {
+ dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n",
+ __func__, reg, value, r);
+ return r;
+ }
+
+ if (r < ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n",
+ __func__, reg, value, r);
+ return -EAGAIN;
+ }
+
+ dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n",
+ __func__, reg, value);
+
+ return 0;
+}
+
+/**
+ * tc35876x_regr - Read DSI-LVDS bridge register using I2C
+ * @client: struct i2c_client to use
+ * @reg: register address
+ * @value: pointer for storing the value
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
+{
+ int r;
+ u8 tx_data[] = {
+ (reg >> 8) & 0xff,
+ reg & 0xff,
+ };
+ u8 rx_data[4];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .buf = tx_data,
+ .len = ARRAY_SIZE(tx_data),
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = rx_data,
+ .len = ARRAY_SIZE(rx_data),
+ },
+ };
+
+ r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (r < 0) {
+ dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__,
+ reg, r);
+ return r;
+ }
+
+ if (r < ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__,
+ reg, r);
+ return -EAGAIN;
+ }
+
+ *value = rx_data[0] << 24 | rx_data[1] << 16 |
+ rx_data[2] << 8 | rx_data[3];
+
+ dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__,
+ reg, *value);
+
+ return 0;
+}
+
+void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
+{
+ struct tc35876x_platform_data *pdata;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
+
+ pdata = dev_get_platdata(&tc35876x_client->dev);
+
+ if (pdata->gpio_bridge_reset == -1)
+ return;
+
+ if (state) {
+ gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ mdelay(10);
+ } else {
+ /* Pull MIPI Bridge reset pin to Low */
+ gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+ mdelay(20);
+ /* Pull MIPI Bridge reset pin to High */
+ gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1);
+ mdelay(40);
+ }
+}
+
+void tc35876x_configure_lvds_bridge(struct drm_device *dev)
+{
+ struct i2c_client *i2c = tc35876x_client;
+ u32 ppi_lptxtimecnt;
+ u32 txtagocnt;
+ u32 txtasurecnt;
+ u32 id;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+ if (!tc35876x_regr(i2c, IDREG, &id))
+ dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id);
+ else
+ dev_err(&tc35876x_client->dev, "Cannot read ID\n");
+
+ ppi_lptxtimecnt = 4;
+ txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4;
+ txtasurecnt = 3 * ppi_lptxtimecnt / 2;
+ tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) |
+ FLD_VAL(txtasurecnt, 10, 0));
+ tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0));
+
+ tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+ tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+ tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+ tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+
+ /* Enabling MIPI & PPI lanes, Enable 4 lanes */
+ tc35876x_regw(i2c, PPI_LANEENABLE,
+ BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
+ tc35876x_regw(i2c, DSI_LANEENABLE,
+ BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
+ tc35876x_regw(i2c, PPI_STARTPPI, BIT(0));
+ tc35876x_regw(i2c, DSI_STARTDSI, BIT(0));
+
+ /* Setting LVDS output frequency */
+ tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) |
+ FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */
+
+ /* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */
+ tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5));
+
+ /* Horizontal back porch and horizontal pulse width. 0x00280028 */
+ tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0));
+
+ /* Horizontal front porch and horizontal active video size. 0x00500500*/
+ tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0));
+
+ /* Vertical back porch and vertical sync pulse width. 0x000e000a */
+ tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0));
+
+ /* Vertical front porch and vertical display size. 0x000e0320 */
+ tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0));
+
+ /* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */
+ tc35876x_regw(i2c, VFUEN, BIT(0));
+
+ /* Soft reset LCD controller. */
+ tc35876x_regw(i2c, SYSRST, BIT(2));
+
+ /* LVDS-TX input muxing */
+ tc35876x_regw(i2c, LVMX0003,
+ INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2));
+ tc35876x_regw(i2c, LVMX0407,
+ INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6));
+ tc35876x_regw(i2c, LVMX0811,
+ INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3));
+ tc35876x_regw(i2c, LVMX1215,
+ INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5));
+ tc35876x_regw(i2c, LVMX1619,
+ INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0));
+ tc35876x_regw(i2c, LVMX2023,
+ INPUT_MUX(LOGIC_0, INPUT_B7, INPUT_B6, INPUT_B5));
+ tc35876x_regw(i2c, LVMX2427,
+ INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC));
+
+ /* Enable LVDS transmitter. */
+ tc35876x_regw(i2c, LVCFG, BIT(0));
+
+ /* Clear notifications. Don't write reserved bits. Was write 0xffffffff
+ * to 0x0288, must be in error?! */
+ tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0));
+}
+
+#define GPIOPWMCTRL 0x38F
+#define PWM0CLKDIV0 0x62 /* low byte */
+#define PWM0CLKDIV1 0x61 /* high byte */
+
+#define SYSTEMCLK 19200000UL /* 19.2 MHz */
+#define PWM_FREQUENCY 9600 /* Hz */
+
+/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */
+static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f)
+{
+ return (baseclk - f) / f;
+}
+
+static void tc35876x_brightness_init(struct drm_device *dev)
+{
+ int ret;
+ u8 pwmctrl;
+ u16 clkdiv;
+
+ /* Make sure the PWM reference is the 19.2 MHz system clock. Read first
+ * instead of setting directly to catch potential conflicts between PWM
+ * users. */
+ ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
+ if (ret || pwmctrl != 0x01) {
+ if (ret)
+ dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
+ else
+ dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
+
+ ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
+ if (ret)
+ dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
+ }
+
+ clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
+
+ ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff);
+ if (!ret)
+ ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
+
+ if (ret)
+ dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
+ else
+ dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
+ clkdiv, PWM_FREQUENCY);
+}
+
+#define PWM0DUTYCYCLE 0x67
+
+void tc35876x_brightness_control(struct drm_device *dev, int level)
+{
+ int ret;
+ u8 duty_val;
+ u8 panel_duty_val;
+
+ level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+ /* PWM duty cycle 0x00...0x63 corresponds to 0...99% */
+ duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL;
+
+ /* I won't pretend to understand this formula. The panel spec is quite
+ * bad engrish.
+ */
+ panel_duty_val = (2 * level - 100) * 0xA9 /
+ MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56;
+
+ ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val);
+ if (ret)
+ dev_err(&tc35876x_client->dev, "%s: ipc write fail\n",
+ __func__);
+
+ if (cmi_lcd_i2c_client) {
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_PWM_MAX, panel_duty_val);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n",
+ __func__);
+ }
+}
+
+void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
+{
+ struct tc35876x_platform_data *pdata;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+ pdata = dev_get_platdata(&tc35876x_client->dev);
+
+ if (pdata->gpio_panel_bl_en != -1)
+ gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0);
+
+ if (pdata->gpio_panel_vadd != -1)
+ gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0);
+}
+
+void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
+{
+ struct tc35876x_platform_data *pdata;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (WARN(!tc35876x_client, "%s called before probe", __func__))
+ return;
+
+ dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+ pdata = dev_get_platdata(&tc35876x_client->dev);
+
+ if (pdata->gpio_panel_vadd != -1) {
+ gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1);
+ msleep(260);
+ }
+
+ if (cmi_lcd_i2c_client) {
+ int ret;
+ dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n");
+ /* Bit 4 is average_saving. Setting it to 1, the brightness is
+ * referenced to the average of the frame content. 0 means
+ * reference to the maximum of frame contents. Bits 3:0 are
+ * allow_distort. When set to a nonzero value, all color values
+ * between 255-allow_distort*2 and 255 are mapped to the
+ * 255-allow_distort*2 value.
+ */
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_ALLOW_DISTORT, 0x10);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev,
+ "i2c write failed (%d)\n", ret);
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_BYPASS_PWMI, 0);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev,
+ "i2c write failed (%d)\n", ret);
+ /* Set minimum brightness value - this is tunable */
+ ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+ PANEL_PWM_MIN, 0x35);
+ if (ret < 0)
+ dev_err(&cmi_lcd_i2c_client->dev,
+ "i2c write failed (%d)\n", ret);
+ }
+
+ if (pdata->gpio_panel_bl_en != -1)
+ gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1);
+
+ tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
+}
+
+static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+
+ dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ /* FIXME: do this properly. */
+ mode->hdisplay = 1280;
+ mode->vdisplay = 800;
+ mode->hsync_start = 1360;
+ mode->hsync_end = 1400;
+ mode->htotal = 1440;
+ mode->vsync_start = 814;
+ mode->vsync_end = 824;
+ mode->vtotal = 838;
+ mode->clock = 33324 << 1;
+
+ dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
+ dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
+ dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
+ dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
+ dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
+ dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
+ dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
+ dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
+ dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+/* DV1 Active area 216.96 x 135.6 mm */
+#define DV1_PANEL_WIDTH 217
+#define DV1_PANEL_HEIGHT 136
+
+static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = DV1_PANEL_WIDTH;
+ pi->height_mm = DV1_PANEL_HEIGHT;
+
+ return 0;
+}
+
+static int tc35876x_bridge_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc35876x_platform_data *pdata;
+
+ dev_info(&client->dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata) {
+ dev_err(&client->dev, "%s: no platform data\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdata->gpio_bridge_reset != -1) {
+ gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset");
+ gpio_direction_output(pdata->gpio_bridge_reset, 0);
+ }
+
+ if (pdata->gpio_panel_bl_en != -1) {
+ gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en");
+ gpio_direction_output(pdata->gpio_panel_bl_en, 0);
+ }
+
+ if (pdata->gpio_panel_vadd != -1) {
+ gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd");
+ gpio_direction_output(pdata->gpio_panel_vadd, 0);
+ }
+
+ tc35876x_client = client;
+
+ return 0;
+}
+
+static int tc35876x_bridge_remove(struct i2c_client *client)
+{
+ struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev);
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ if (pdata->gpio_bridge_reset != -1)
+ gpio_free(pdata->gpio_bridge_reset);
+
+ if (pdata->gpio_panel_bl_en != -1)
+ gpio_free(pdata->gpio_panel_bl_en);
+
+ if (pdata->gpio_panel_vadd != -1)
+ gpio_free(pdata->gpio_panel_vadd);
+
+ tc35876x_client = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id tc35876x_bridge_id[] = {
+ { "i2c_disp_brig", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id);
+
+static struct i2c_driver tc35876x_bridge_i2c_driver = {
+ .driver = {
+ .name = "i2c_disp_brig",
+ },
+ .id_table = tc35876x_bridge_id,
+ .probe = tc35876x_bridge_probe,
+ .remove = __devexit_p(tc35876x_bridge_remove),
+};
+
+/* LCD panel I2C */
+static int cmi_lcd_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ dev_info(&client->dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ cmi_lcd_i2c_client = client;
+
+ return 0;
+}
+
+static int cmi_lcd_i2c_remove(struct i2c_client *client)
+{
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ cmi_lcd_i2c_client = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id cmi_lcd_i2c_id[] = {
+ { "cmi-lcd", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id);
+
+static struct i2c_driver cmi_lcd_i2c_driver = {
+ .driver = {
+ .name = "cmi-lcd",
+ },
+ .id_table = cmi_lcd_i2c_id,
+ .probe = cmi_lcd_i2c_probe,
+ .remove = __devexit_p(cmi_lcd_i2c_remove),
+};
+
+/* HACK to create I2C device while it's not created by platform code */
+#define CMI_LCD_I2C_ADAPTER 2
+#define CMI_LCD_I2C_ADDR 0x60
+
+static int cmi_lcd_hack_create_device(void)
+{
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info info = {
+ .type = "cmi-lcd",
+ .addr = CMI_LCD_I2C_ADDR,
+ };
+
+ pr_debug("%s\n", __func__);
+
+ adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER);
+ if (!adapter) {
+ pr_err("%s: i2c_get_adapter(%d) failed\n", __func__,
+ CMI_LCD_I2C_ADAPTER);
+ return -EINVAL;
+ }
+
+ client = i2c_new_device(adapter, &info);
+ if (!client) {
+ pr_err("%s: i2c_new_device() failed\n", __func__);
+ i2c_put_adapter(adapter);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tc35876x_funcs = {
+ .encoder_funcs = &tc35876x_encoder_funcs,
+ .encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
+ .get_config_mode = tc35876x_get_config_mode,
+ .get_panel_info = tc35876x_get_panel_info,
+};
+
+void tc35876x_init(struct drm_device *dev)
+{
+ int r;
+
+ dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+
+ cmi_lcd_hack_create_device();
+
+ r = i2c_add_driver(&cmi_lcd_i2c_driver);
+ if (r < 0)
+ dev_err(&dev->pdev->dev,
+ "%s: i2c_add_driver() for %s failed (%d)\n",
+ __func__, cmi_lcd_i2c_driver.driver.name, r);
+
+ r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
+ if (r < 0)
+ dev_err(&dev->pdev->dev,
+ "%s: i2c_add_driver() for %s failed (%d)\n",
+ __func__, tc35876x_bridge_i2c_driver.driver.name, r);
+
+ tc35876x_brightness_init(dev);
+}
+
+void tc35876x_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ i2c_del_driver(&tc35876x_bridge_i2c_driver);
+
+ if (cmi_lcd_i2c_client)
+ i2c_del_driver(&cmi_lcd_i2c_driver);
+}
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
new file mode 100644
index 0000000..b14b7f9
--- /dev/null
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__
+#define __MDFLD_DSI_LVDS_BRIDGE_H__
+
+void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state);
+void tc35876x_configure_lvds_bridge(struct drm_device *dev);
+void tc35876x_brightness_control(struct drm_device *dev, int level);
+void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev);
+void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev);
+void tc35876x_init(struct drm_device *dev);
+void tc35876x_exit(void);
+
+extern const struct panel_funcs mdfld_tc35876x_funcs;
+
+#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 07d55df..d3f2e87 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -252,10 +252,7 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
- priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "scale", 2);
- priv->scale_property->values[0] = 0;
- priv->scale_property->values[1] = 2;
+ priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
priv->select_subconnector);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 7f4b4e1..2c8a60c 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -99,7 +99,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
buf_priv = buf->dev_private;
vma->vm_flags |= (VM_IO | VM_DONTCOPY);
- vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
@@ -1208,6 +1207,8 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags)
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
+ pci_set_master(dev->pdev);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 808b255..ce7fc77 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,7 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \
i915_suspend.o \
i915_gem.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index deaa657..fdb7cce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -83,6 +83,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
B(supports_tv);
B(has_bsd_ring);
B(has_blt_ring);
+ B(has_llc);
#undef B
return 0;
@@ -563,45 +564,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_dump_object(struct seq_file *m,
- struct io_mapping *mapping,
- struct drm_i915_gem_object *obj)
-{
- int page, page_count, i;
-
- page_count = obj->base.size / PAGE_SIZE;
- for (page = 0; page < page_count; page++) {
- u32 *mem = io_mapping_map_wc(mapping,
- obj->gtt_offset + page * PAGE_SIZE);
- for (i = 0; i < PAGE_SIZE; i += 4)
- seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- io_mapping_unmap(mem);
- }
-}
-
-static int i915_batchbuffer_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
- i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
- }
- }
-
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
static int i915_ringbuffer_data(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -668,9 +630,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
static const char *ring_str(int ring)
{
switch (ring) {
- case RING_RENDER: return " render";
- case RING_BSD: return " bsd";
- case RING_BLT: return " blt";
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
default: return "";
}
}
@@ -713,7 +675,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
@@ -723,6 +685,7 @@ static void print_error_buffers(struct seq_file *m,
tiling_flag(err->tiling),
dirty_flag(err->dirty),
purgeable_flag(err->purgeable),
+ err->ring != -1 ? " " : "",
ring_str(err->ring),
cache_level_str(err->cache_level));
@@ -736,6 +699,38 @@ static void print_error_buffers(struct seq_file *m,
}
}
+static void i915_ring_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ unsigned ring)
+{
+ seq_printf(m, "%s command stream:\n", ring_str(ring));
+ seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ seq_printf(m, " SYNC_0: 0x%08x\n",
+ error->semaphore_mboxes[ring][0]);
+ seq_printf(m, " SYNC_1: 0x%08x\n",
+ error->semaphore_mboxes[ring][1]);
+ }
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+ seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -743,7 +738,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
- int i, page, offset, elt;
+ int i, j, page, offset, elt;
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (!dev_priv->first_error) {
@@ -758,35 +753,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "ERROR: 0x%08x\n", error->error);
- seq_printf(m, "Blitter command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
- seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
- seq_printf(m, "Video (BSD) command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
- seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
- }
- seq_printf(m, "Render command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- if (INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
+ seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
- seq_printf(m, " seqno: 0x%08x\n", error->seqno);
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+ i915_ring_error_state(m, dev, error, RCS);
+ if (HAS_BLT(dev))
+ i915_ring_error_state(m, dev, error, BCS);
+ if (HAS_BSD(dev))
+ i915_ring_error_state(m, dev, error, VCS);
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -798,10 +778,10 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->pinned_bo,
error->pinned_bo_count);
- for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
- if (error->batchbuffer[i]) {
- struct drm_i915_error_object *obj = error->batchbuffer[i];
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ struct drm_i915_error_object *obj;
+ if ((obj = error->ring[i].batchbuffer)) {
seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -813,11 +793,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
}
- }
- for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
- if (error->ringbuffer[i]) {
- struct drm_i915_error_object *obj = error->ringbuffer[i];
+ if (error->ring[i].num_requests) {
+ seq_printf(m, "%s --- %d requests\n",
+ dev_priv->ring[i].name,
+ error->ring[i].num_requests);
+ for (j = 0; j < error->ring[i].num_requests; j++) {
+ seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ error->ring[i].requests[j].seqno,
+ error->ring[i].requests[j].jiffies,
+ error->ring[i].requests[j].tail);
+ }
+ }
+
+ if ((obj = error->ring[i].ringbuffer)) {
seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -1414,9 +1403,108 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
return 0;
}
+static const char *swizzle_string(unsigned swizzle)
+{
+ switch(swizzle) {
+ case I915_BIT_6_SWIZZLE_NONE:
+ return "none";
+ case I915_BIT_6_SWIZZLE_9:
+ return "bit9";
+ case I915_BIT_6_SWIZZLE_9_10:
+ return "bit9/bit10";
+ case I915_BIT_6_SWIZZLE_9_11:
+ return "bit9/bit11";
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ return "bit9/bit10/bit11";
+ case I915_BIT_6_SWIZZLE_9_17:
+ return "bit9/bit17";
+ case I915_BIT_6_SWIZZLE_9_10_17:
+ return "bit9/bit10/bit17";
+ case I915_BIT_6_SWIZZLE_UNKNOWN:
+ return "unkown";
+ }
+
+ return "bug";
+}
+
+static int i915_swizzle_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+ if (IS_GEN3(dev) || IS_GEN4(dev)) {
+ seq_printf(m, "DDC = 0x%08x\n",
+ I915_READ(DCC));
+ seq_printf(m, "C0DRB3 = 0x%04x\n",
+ I915_READ16(C0DRB3));
+ seq_printf(m, "C1DRB3 = 0x%04x\n",
+ I915_READ16(C1DRB3));
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C0));
+ seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C1));
+ seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C2));
+ seq_printf(m, "TILECTL = 0x%08x\n",
+ I915_READ(TILECTL));
+ seq_printf(m, "ARB_MODE = 0x%08x\n",
+ I915_READ(ARB_MODE));
+ seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+ I915_READ(DISP_ARB_CTL));
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i, ret;
+
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ if (INTEL_INFO(dev)->gen == 6)
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ring = &dev_priv->ring[i];
+
+ seq_printf(m, "%s\n", ring->name);
+ if (INTEL_INFO(dev)->gen == 7)
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+ seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+ seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+ seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ }
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ seq_printf(m, "aliasing PPGTT:\n");
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ }
+ seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int
-i915_wedged_open(struct inode *inode,
- struct file *filp)
+i915_debugfs_common_open(struct inode *inode,
+ struct file *filp)
{
filp->private_data = inode->i_private;
return 0;
@@ -1472,20 +1560,12 @@ i915_wedged_write(struct file *filp,
static const struct file_operations i915_wedged_fops = {
.owner = THIS_MODULE,
- .open = i915_wedged_open,
+ .open = i915_debugfs_common_open,
.read = i915_wedged_read,
.write = i915_wedged_write,
.llseek = default_llseek,
};
-static int
-i915_max_freq_open(struct inode *inode,
- struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
@@ -1542,20 +1622,12 @@ i915_max_freq_write(struct file *filp,
static const struct file_operations i915_max_freq_fops = {
.owner = THIS_MODULE,
- .open = i915_max_freq_open,
+ .open = i915_debugfs_common_open,
.read = i915_max_freq_read,
.write = i915_max_freq_write,
.llseek = default_llseek,
};
-static int
-i915_cache_sharing_open(struct inode *inode,
- struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
i915_cache_sharing_read(struct file *filp,
char __user *ubuf,
@@ -1621,7 +1693,7 @@ i915_cache_sharing_write(struct file *filp,
static const struct file_operations i915_cache_sharing_fops = {
.owner = THIS_MODULE,
- .open = i915_cache_sharing_open,
+ .open = i915_debugfs_common_open,
.read = i915_cache_sharing_read,
.write = i915_cache_sharing_write,
.llseek = default_llseek,
@@ -1653,21 +1725,6 @@ drm_add_fake_info_node(struct drm_minor *minor,
return 0;
}
-static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
-
- ent = debugfs_create_file("i915_wedged",
- S_IRUGO | S_IWUSR,
- root, dev,
- &i915_wedged_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
-}
-
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
@@ -1729,34 +1786,22 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
-static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
-
- ent = debugfs_create_file("i915_max_freq",
- S_IRUGO | S_IWUSR,
- root, dev,
- &i915_max_freq_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
-}
-
-static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
+static int i915_debugfs_create(struct dentry *root,
+ struct drm_minor *minor,
+ const char *name,
+ const struct file_operations *fops)
{
struct drm_device *dev = minor->dev;
struct dentry *ent;
- ent = debugfs_create_file("i915_cache_sharing",
+ ent = debugfs_create_file(name,
S_IRUGO | S_IWUSR,
root, dev,
- &i915_cache_sharing_fops);
+ fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
- return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
+ return drm_add_fake_info_node(minor, ent, fops);
}
static struct drm_info_list i915_debugfs_list[] = {
@@ -1782,7 +1827,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
- {"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
@@ -1798,6 +1842,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+ {"i915_swizzle_info", i915_swizzle_info, 0},
+ {"i915_ppgtt_info", i915_ppgtt_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -1805,17 +1851,25 @@ int i915_debugfs_init(struct drm_minor *minor)
{
int ret;
- ret = i915_wedged_create(minor->debugfs_root, minor);
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_wedged",
+ &i915_wedged_fops);
if (ret)
return ret;
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
- ret = i915_max_freq_create(minor->debugfs_root, minor);
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_max_freq",
+ &i915_max_freq_fops);
if (ret)
return ret;
- ret = i915_cache_sharing_create(minor->debugfs_root, minor);
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_cache_sharing",
+ &i915_cache_sharing_fops);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ddfe3d9..9341eb8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -784,6 +784,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_GEN7_SOL_RESET:
value = 1;
break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1193,22 +1196,39 @@ static int i915_load_gem_init(struct drm_device *dev)
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch page.
- * There are a number of places where the hardware apparently
- * prefetches past the end of the object, and we've seen multiple
- * hangs with the GPU head pointer stuck in a batchbuffer bound
- * at the last page of the aperture. One page should be enough to
- * keep any prefetching inside of the aperture.
- */
- i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
-
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_init_ringbuffer(dev);
+ if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ /* For paranoia keep the guard page in between. */
+ gtt_size -= PAGE_SIZE;
+
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (ret)
+ return ret;
+ } else {
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch
+ * page. There are a number of places where the hardware
+ * apparently prefetches past the end of the object, and we've
+ * seen multiple hangs with the GPU head pointer stuck in a
+ * batchbuffer bound at the last page of the aperture. One page
+ * should be enough to keep any prefetching inside of the
+ * aperture.
+ */
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
+ }
+
+ ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
- if (ret)
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
return ret;
+ }
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
@@ -1295,6 +1315,7 @@ cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@@ -1930,6 +1951,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ pci_set_master(dev->pdev);
+
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
@@ -2129,7 +2152,7 @@ int i915_driver_unload(struct drm_device *dev)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -2182,6 +2205,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_aliasing_ppgtt(dev);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
@@ -2247,18 +2271,12 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_gem_lastclose(dev);
- if (dev_priv->agp_heap)
- i915_mem_takedown(&(dev_priv->agp_heap));
-
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
i915_gem_release(dev, file_priv);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -2277,11 +2295,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 308f819..0694e17 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -103,6 +103,11 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
+bool i915_enable_ppgtt __read_mostly = 1;
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
+MODULE_PARM_DESC(i915_enable_ppgtt,
+ "Enable PPGTT (default: true)");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -198,7 +203,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
- .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
};
@@ -214,6 +219,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -222,6 +228,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -229,6 +236,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -237,6 +245,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -376,16 +385,27 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(FORCEWAKE);
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
}
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
- POSTING_READ(FORCEWAKE_MT);
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
}
/*
@@ -401,8 +421,10 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
+ int ret = 0;
+
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
@@ -410,10 +432,13 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
udelay(10);
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
}
- WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
dev_priv->gt_fifo_count = fifo;
}
dev_priv->gt_fifo_count--;
+
+ return ret;
}
static int i915_drm_freeze(struct drm_device *dev)
@@ -494,7 +519,7 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- error = i915_gem_init_ringbuffer(dev);
+ error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (HAS_PCH_SPLIT(dev))
@@ -633,7 +658,7 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
}
/**
- * i965_reset - reset chip after a hang
+ * i915_reset - reset chip after a hang
* @dev: drm device to reset
* @flags: reset domains
*
@@ -709,12 +734,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
!dev_priv->mm.suspended) {
dev_priv->mm.suspended = 0;
+ i915_gem_init_swizzling(dev);
+
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
if (HAS_BSD(dev))
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+ i915_gem_init_ppgtt(dev);
+
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_mode_config_reset(dev);
@@ -977,11 +1006,15 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- __gen6_gt_wait_for_fifo(dev_priv); \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
write##y(val, dev_priv->regs + reg); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9689ca3..c0f19f5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,6 +35,7 @@
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
@@ -135,6 +136,7 @@ struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
uint32_t setup_seqno;
+ int pin_count;
};
struct sdvo_device_mapping {
@@ -152,33 +154,40 @@ struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 pipestat[I915_MAX_PIPES];
- u32 ipeir;
- u32 ipehr;
- u32 instdone;
- u32 acthd;
+ u32 tail[I915_NUM_RINGS];
+ u32 head[I915_NUM_RINGS];
+ u32 ipeir[I915_NUM_RINGS];
+ u32 ipehr[I915_NUM_RINGS];
+ u32 instdone[I915_NUM_RINGS];
+ u32 acthd[I915_NUM_RINGS];
+ u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head[I915_NUM_RINGS];
+ u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
- u32 bcs_acthd; /* gen6+ blt engine */
- u32 bcs_ipehr;
- u32 bcs_ipeir;
- u32 bcs_instdone;
- u32 bcs_seqno;
- u32 vcs_acthd; /* gen6+ bsd engine */
- u32 vcs_ipehr;
- u32 vcs_ipeir;
- u32 vcs_instdone;
- u32 vcs_seqno;
- u32 instpm;
- u32 instps;
+ u32 instpm[I915_NUM_RINGS];
+ u32 instps[I915_NUM_RINGS];
u32 instdone1;
- u32 seqno;
+ u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
+ u32 fault_reg[I915_NUM_RINGS];
+ u32 done_reg;
+ u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
- struct drm_i915_error_object {
- int page_count;
- u32 gtt_offset;
- u32 *pages[0];
- } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
+ struct drm_i915_error_ring {
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer;
+ struct drm_i915_error_request {
+ long jiffies;
+ u32 seqno;
+ u32 tail;
+ } *requests;
+ int num_requests;
+ } ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@@ -191,7 +200,7 @@ struct drm_i915_error_state {
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
- u32 ring:4;
+ s32 ring:4;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
@@ -255,6 +264,17 @@ struct intel_device_info {
u8 supports_tv:1;
u8 has_bsd_ring:1;
u8 has_blt_ring:1;
+ u8 has_llc:1;
+};
+
+#define I915_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES 1024
+struct i915_hw_ppgtt {
+ unsigned num_pd_entries;
+ struct page **pt_pages;
+ uint32_t pd_offset;
+ dma_addr_t *pt_dma_addr;
+ dma_addr_t scratch_page_dma_addr;
};
enum no_fbc_reason {
@@ -279,6 +299,16 @@ enum intel_pch {
struct intel_fbdev;
struct intel_fbc_work;
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ bool force_bit;
+ bool has_gpio;
+ u32 reg0;
+ u32 gpio_reg;
+ struct i2c_algo_bit_data bit_algo;
+ struct drm_i915_private *dev_priv;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -296,11 +326,11 @@ typedef struct drm_i915_private {
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
- struct intel_gmbus {
- struct i2c_adapter adapter;
- struct i2c_adapter *force_bit;
- u32 reg0;
- } *gmbus;
+ struct intel_gmbus *gmbus;
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
@@ -335,7 +365,6 @@ typedef struct drm_i915_private {
int tex_lru_log_granularity;
int allow_batchbuffer;
- struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
@@ -584,6 +613,9 @@ typedef struct drm_i915_private {
struct io_mapping *gtt_mapping;
int gtt_mtrr;
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
struct shrinker inactive_shrinker;
/**
@@ -749,6 +781,13 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
} drm_i915_private_t;
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
enum i915_cache_level {
I915_CACHE_NONE,
I915_CACHE_LLC,
@@ -841,6 +880,8 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
+ unsigned int has_aliasing_ppgtt_mapping:1;
+
struct page **pages;
/**
@@ -918,6 +959,9 @@ struct drm_i915_gem_request {
/** GEM sequence number associated with this request. */
uint32_t seqno;
+ /** Postion in the ringbuffer of the end of the request */
+ u32 tail;
+
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@@ -974,8 +1018,11 @@ struct drm_i915_file_private {
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
+
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1018,6 +1065,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
+extern bool i915_enable_ppgtt __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1079,18 +1127,6 @@ extern void i915_destroy_error_state(struct drm_device *dev);
#endif
-/* i915_mem.c */
-extern int i915_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_init_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(struct drm_device * dev,
- struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1170,37 +1206,55 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-static inline u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- return ring->outstanding_lazy_request = dev_priv->next_seqno;
-}
+u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+static inline void
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ }
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno);
+ uint32_t seqno,
+ bool do_retire);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1227,6 +1281,14 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
/* i915_gem_gtt.c */
+int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj);
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
@@ -1365,7 +1427,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e55badb..1f441f5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -58,6 +58,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
+static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -258,73 +259,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
-static inline void
-slow_shmem_copy(struct page *dst_page,
- int dst_offset,
- struct page *src_page,
- int src_offset,
- int length)
-{
- char *dst_vaddr, *src_vaddr;
-
- dst_vaddr = kmap(dst_page);
- src_vaddr = kmap(src_page);
-
- memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
-
- kunmap(src_page);
- kunmap(dst_page);
-}
-
-static inline void
-slow_shmem_bit17_copy(struct page *gpu_page,
- int gpu_offset,
- struct page *cpu_page,
- int cpu_offset,
- int length,
- int is_read)
-{
- char *gpu_vaddr, *cpu_vaddr;
-
- /* Use the unswizzled path if this page isn't affected. */
- if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
- if (is_read)
- return slow_shmem_copy(cpu_page, cpu_offset,
- gpu_page, gpu_offset, length);
- else
- return slow_shmem_copy(gpu_page, gpu_offset,
- cpu_page, cpu_offset, length);
- }
-
- gpu_vaddr = kmap(gpu_page);
- cpu_vaddr = kmap(cpu_page);
-
- /* Copy the data, XORing A6 with A17 (1). The user already knows he's
- * XORing with the other bits (A9 for Y, A9 and A10 for X)
- */
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- if (is_read) {
- memcpy(cpu_vaddr + cpu_offset,
- gpu_vaddr + swizzled_gpu_offset,
- this_length);
- } else {
- memcpy(gpu_vaddr + swizzled_gpu_offset,
- cpu_vaddr + cpu_offset,
- this_length);
- }
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- kunmap(cpu_page);
- kunmap(gpu_page);
-}
-
/**
* This is the fast shmem pread path, which attempts to copy_from_user directly
* from the backing pages of the object to the user's address space. On a
@@ -385,6 +319,58 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
return 0;
}
+static inline int
+__copy_to_user_swizzled(char __user *cpu_vaddr,
+ const char *gpu_vaddr, int gpu_offset,
+ int length)
+{
+ int ret, cpu_offset = 0;
+
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
+
+ ret = __copy_to_user(cpu_vaddr + cpu_offset,
+ gpu_vaddr + swizzled_gpu_offset,
+ this_length);
+ if (ret)
+ return ret + length;
+
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
+
+ return 0;
+}
+
+static inline int
+__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
+ const char *cpu_vaddr,
+ int length)
+{
+ int ret, cpu_offset = 0;
+
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
+
+ ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
+ cpu_vaddr + cpu_offset,
+ this_length);
+ if (ret)
+ return ret + length;
+
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
+
+ return 0;
+}
+
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -398,72 +384,34 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- struct mm_struct *mm = current->mm;
- struct page **user_pages;
+ char __user *user_data;
ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_offset;
- int data_page_index, data_page_offset;
- int page_length;
- int ret;
- uint64_t data_ptr = args->data_ptr;
- int do_bit17_swizzling;
+ loff_t offset;
+ int shmem_page_offset, page_length, ret;
+ int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, yet we want to hold it while
- * dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
+ offset = args->offset;
mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 1, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj,
- args->offset,
- args->size);
- if (ret)
- goto out;
-
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
- offset = args->offset;
while (remain > 0) {
struct page *page;
+ char *vaddr;
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
shmem_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
-
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
@@ -471,36 +419,38 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
goto out;
}
- if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(page,
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 1);
- } else {
- slow_shmem_copy(user_pages[data_page_index],
- data_page_offset,
- page,
- shmem_page_offset,
- page_length);
- }
+ page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+ (page_to_phys(page) & (1 << 17)) != 0;
+
+ vaddr = kmap(page);
+ if (page_do_bit17_swizzling)
+ ret = __copy_to_user_swizzled(user_data,
+ vaddr, shmem_page_offset,
+ page_length);
+ else
+ ret = __copy_to_user(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap(page);
mark_page_accessed(page);
page_cache_release(page);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
remain -= page_length;
- data_ptr += page_length;
+ user_data += page_length;
offset += page_length;
}
out:
- for (i = 0; i < pinned_pages; i++) {
- SetPageDirty(user_pages[i]);
- mark_page_accessed(user_pages[i]);
- page_cache_release(user_pages[i]);
- }
- drm_free_large(user_pages);
+ mutex_lock(&dev->struct_mutex);
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
return ret;
}
@@ -841,71 +791,36 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- struct mm_struct *mm = current->mm;
- struct page **user_pages;
ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_offset;
- int data_page_index, data_page_offset;
- int page_length;
- int ret;
- uint64_t data_ptr = args->data_ptr;
- int do_bit17_swizzling;
+ loff_t offset;
+ char __user *user_data;
+ int shmem_page_offset, page_length, ret;
+ int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
-
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
-
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
offset = args->offset;
obj->dirty = 1;
+ mutex_unlock(&dev->struct_mutex);
+
while (remain > 0) {
struct page *page;
+ char *vaddr;
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
shmem_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
@@ -913,34 +828,45 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
goto out;
}
- if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(page,
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 0);
- } else {
- slow_shmem_copy(page,
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
- }
+ page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+ (page_to_phys(page) & (1 << 17)) != 0;
+
+ vaddr = kmap(page);
+ if (page_do_bit17_swizzling)
+ ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
+ user_data,
+ page_length);
+ else
+ ret = __copy_from_user(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ kunmap(page);
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
remain -= page_length;
- data_ptr += page_length;
+ user_data += page_length;
offset += page_length;
}
out:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+ mutex_lock(&dev->struct_mutex);
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ /* and flush dirty cachelines in case the object isn't in the cpu write
+ * domain anymore. */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ intel_gtt_chipset_flush();
+ }
return ret;
}
@@ -996,10 +922,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_obj)
+ if (obj->phys_obj) {
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj->gtt_space &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ goto out;
+ }
+
+ if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
@@ -1018,18 +947,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
out_unpin:
i915_gem_object_unpin(obj);
- } else {
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ if (ret != -EFAULT)
+ goto out;
+ /* Fall through to the shmfs paths because the gtt paths might
+ * fail with non-page-backed user pointers (e.g. gtt mappings
+ * when moving data between textures). */
}
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ goto out;
+
+ ret = -EFAULT;
+ if (!i915_gem_object_needs_bit17_swizzle(obj))
+ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+ if (ret == -EFAULT)
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -1141,7 +1076,6 @@ int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
unsigned long addr;
@@ -1153,11 +1087,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
- if (obj->size > dev_priv->mm.gtt_mappable_end) {
- drm_gem_object_unreference_unlocked(obj);
- return -E2BIG;
- }
-
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1647,6 +1576,28 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
}
}
+static u32
+i915_gem_get_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 seqno = dev_priv->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++dev_priv->next_seqno == 0)
+ dev_priv->next_seqno = 1;
+
+ return seqno;
+}
+
+u32
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request == 0)
+ ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+
+ return ring->outstanding_lazy_request;
+}
+
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
@@ -1654,10 +1605,19 @@ i915_add_request(struct intel_ring_buffer *ring,
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
+ u32 request_ring_position;
int was_empty;
int ret;
BUG_ON(request == NULL);
+ seqno = i915_gem_next_request_seqno(ring);
+
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
+ */
+ request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno);
if (ret)
@@ -1667,6 +1627,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->seqno = seqno;
request->ring = ring;
+ request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
@@ -1681,7 +1642,7 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
- ring->outstanding_lazy_request = false;
+ ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
@@ -1803,7 +1764,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
-static void
+void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
@@ -1831,6 +1792,12 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
break;
trace_i915_gem_request_retire(ring, request->seqno);
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ */
+ ring->last_retired_head = request->tail;
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@@ -1943,7 +1910,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno)
+ uint32_t seqno,
+ bool do_retire)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
@@ -2017,17 +1985,12 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
- if (ret && ret != -ERESTARTSYS)
- DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_seqno(ring),
- dev_priv->next_seqno);
-
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
*/
- if (ret == 0)
+ if (ret == 0 && do_retire)
i915_gem_retire_requests_ring(ring);
return ret;
@@ -2051,7 +2014,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
+ true);
if (ret)
return ret;
}
@@ -2089,6 +2053,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret = 0;
if (obj->gtt_space == NULL)
@@ -2133,6 +2098,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
trace_i915_gem_object_unbind(obj);
i915_gem_gtt_unbind_object(obj);
+ if (obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
+ obj->has_aliasing_ppgtt_mapping = 0;
+ }
+
i915_gem_object_put_pages_gtt(obj);
list_del_init(&obj->gtt_list);
@@ -2172,7 +2142,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
{
int ret;
@@ -2186,18 +2156,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
+ do_retire);
}
-int
-i915_gpu_idle(struct drm_device *dev)
+int i915_gpu_idle(struct drm_device *dev, bool do_retire)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i]);
+ ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
if (ret)
return ret;
}
@@ -2400,7 +2370,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno);
+ obj->last_fenced_seqno,
+ true);
if (ret)
return ret;
}
@@ -2432,6 +2403,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
i915_gem_clear_fence_reg(obj->base.dev,
&dev_priv->fence_regs[obj->fence_reg]);
@@ -2456,7 +2429,7 @@ i915_find_fence_reg(struct drm_device *dev,
if (!reg->obj)
return reg;
- if (!reg->obj->pin_count)
+ if (!reg->pin_count)
avail = reg;
}
@@ -2466,7 +2439,7 @@ i915_find_fence_reg(struct drm_device *dev,
/* None available, try to steal one or wait for a user to finish */
avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
- if (reg->obj->pin_count)
+ if (reg->pin_count)
continue;
if (first == NULL)
@@ -2541,7 +2514,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno);
+ reg->setup_seqno,
+ true);
if (ret)
return ret;
}
@@ -2560,7 +2534,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
reg = i915_find_fence_reg(dev, pipelined);
if (reg == NULL)
- return -ENOSPC;
+ return -EDEADLK;
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
@@ -2660,6 +2634,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
list_del_init(&reg->lru_list);
reg->obj = NULL;
reg->setup_seqno = 0;
+ reg->pin_count = 0;
}
/**
@@ -2946,6 +2921,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
if (obj->cache_level == cache_level)
@@ -2974,6 +2951,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
i915_gem_gtt_rebind_object(obj, cache_level);
+ if (obj->has_aliasing_ppgtt_mapping)
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@@ -3084,10 +3064,13 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
return ret;
}
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+
/* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-
- return i915_gem_object_wait_rendering(obj);
+ return 0;
}
/**
@@ -3619,8 +3602,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- /* On Gen6, we can have the GPU use the LLC (the CPU
+ if (HAS_LLC(dev)) {
+ /* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
* display scanout are coherent with the CPU in
@@ -3710,7 +3693,7 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -3745,12 +3728,71 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+void i915_gem_init_swizzling(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 5 ||
+ dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN5(dev))
+ return;
+
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+ if (IS_GEN6(dev))
+ I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else
+ I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+}
+
+void i915_gem_init_ppgtt(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ return;
+
+ pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ring = &dev_priv->ring[i];
+
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ }
+}
+
int
-i915_gem_init_ringbuffer(struct drm_device *dev)
+i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ i915_gem_init_swizzling(dev);
+
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -3769,6 +3811,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
dev_priv->next_seqno = 1;
+ i915_gem_init_ppgtt(dev);
+
return 0;
cleanup_bsd_ring:
@@ -3806,7 +3850,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
+ ret = i915_gem_init_hw(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4201,7 +4245,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev) == 0)
+ if (i915_gpu_idle(dev, true) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ead5d00..21a8271 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,7 +36,6 @@ static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
list_add(&obj->exec_list, unwind);
- drm_gem_object_reference(&obj->base);
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -49,21 +48,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
struct drm_i915_gem_object *obj;
int ret = 0;
- i915_gem_retire_requests(dev);
-
- /* Re-check for free space after retiring requests */
- if (mappable) {
- if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- min_size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
- 0))
- return 0;
- } else {
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
- }
-
trace_i915_gem_evict(dev, min_size, alignment, mappable);
/*
@@ -139,7 +123,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
BUG_ON(ret);
list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -158,10 +141,10 @@ found:
exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
+ drm_gem_object_reference(&obj->base);
continue;
}
list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
@@ -195,7 +178,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 65e1f00..81687af 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= obj->ring->id;
+ cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= ring->id;
+ cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
@@ -287,14 +287,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
- DRM_ERROR("No GTT space found for object %d\n",
+ DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
- DRM_ERROR("reloc with multiple write domains: "
+ DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain);
return ret;
}
- if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
- DRM_ERROR("reloc with read/write CPU domains: "
+ if (unlikely((reloc->write_domain | reloc->read_domains)
+ & ~I915_GEM_GPU_DOMAINS)) {
+ DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@@ -315,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
- DRM_ERROR("Write domain conflict: "
+ DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
@@ -336,7 +337,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
- DRM_ERROR("Relocation beyond object bounds: "
+ DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
@@ -344,7 +345,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
if (unlikely(reloc->offset & 3)) {
- DRM_ERROR("Relocation not 4-byte aligned: "
+ DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
@@ -461,11 +462,60 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
+#define __EXEC_OBJECT_HAS_FENCE (1<<31)
+
+static int
+pin_and_fence_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool need_fence, need_mappable;
+ int ret;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ if (ret)
+ return ret;
+
+ if (has_fenced_gpu_access) {
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ if (obj->tiling_mode) {
+ ret = i915_gem_object_get_fence(obj, ring);
+ if (ret)
+ goto err_unpin;
+
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ i915_gem_object_pin_fence(obj);
+ } else {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto err_unpin;
+ }
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
+
+ entry->offset = obj->gtt_offset;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+ return ret;
+}
+
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
@@ -518,6 +568,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
+
if (!obj->gtt_space)
continue;
@@ -532,58 +583,55 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = i915_gem_object_pin(obj,
- entry->alignment,
- need_mappable);
+ ret = pin_and_fence_object(obj, ring);
if (ret)
goto err;
-
- entry++;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- bool need_fence;
+ if (obj->gtt_space)
+ continue;
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
+ ret = pin_and_fence_object(obj, ring);
+ if (ret) {
+ int ret_ignore;
+
+ /* This can potentially raise a harmless
+ * -EINVAL if we failed to bind in the above
+ * call. It cannot raise -EINTR since we know
+ * that the bo is freshly bound and so will
+ * not need to be flushed or waited upon.
+ */
+ ret_ignore = i915_gem_object_unbind(obj);
+ (void)ret_ignore;
+ WARN_ON(obj->gtt_space);
+ break;
+ }
+ }
- if (!obj->gtt_space) {
- bool need_mappable =
- entry->relocation_count ? true : need_fence;
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
- ret = i915_gem_object_pin(obj,
- entry->alignment,
- need_mappable);
- if (ret)
- break;
- }
+ if (!obj->gtt_space)
+ continue;
- if (has_fenced_gpu_access) {
- if (need_fence) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- break;
- } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode == I915_TILING_NONE) {
- /* XXX pipelined! */
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- break;
- }
- obj->pending_fenced_gpu_access = need_fence;
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
- entry->offset = obj->gtt_offset;
- }
+ i915_gem_object_unpin(obj);
- /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
- i915_gem_object_unpin(obj);
+ /* ... and ensure ppgtt mapping exist if needed. */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
}
if (ret != -ENOSPC || retry > 1)
@@ -600,16 +648,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} while (1);
err:
- obj = list_entry(obj->exec_list.prev,
- struct drm_i915_gem_object,
- exec_list);
- while (objects != &obj->exec_list) {
- if (obj->gtt_space)
- i915_gem_object_unpin(obj);
+ list_for_each_entry_continue_reverse(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ continue;
+
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
- obj = list_entry(obj->exec_list.prev,
- struct drm_i915_gem_object,
- exec_list);
+ i915_gem_object_unpin(obj);
}
return ret;
@@ -682,7 +733,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
@@ -1013,7 +1064,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
- DRM_ERROR("execbuf with invalid offset/length\n");
+ DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
@@ -1028,20 +1079,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
- DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
- DRM_ERROR("execbuf with unknown ring: %d\n",
+ DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
@@ -1067,18 +1118,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
break;
default:
- DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
- DRM_ERROR("clip rectangles are only valid with the render ring\n");
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
@@ -1123,7 +1174,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
@@ -1131,7 +1182,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (!list_empty(&obj->exec_list)) {
- DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
@@ -1169,7 +1220,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
@@ -1186,7 +1237,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
@@ -1274,7 +1325,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i;
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@@ -1282,7 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@@ -1293,7 +1344,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
+ DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@@ -1334,7 +1385,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
+ DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
@@ -1354,7 +1405,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
int ret;
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@@ -1364,7 +1415,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
@@ -1373,7 +1424,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
+ DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
@@ -1388,7 +1439,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
+ DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6042c5e..2eacd78 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -29,6 +29,279 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry,
+ unsigned num_entries)
+{
+ uint32_t *pt_vaddr;
+ uint32_t scratch_pte;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned last_pte, i;
+
+ scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
+ scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ if (last_pte > I915_PPGTT_PT_ENTRIES)
+ last_pte = I915_PPGTT_PT_ENTRIES;
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (i = first_pte; i < last_pte; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt;
+ uint32_t pd_entry;
+ unsigned first_pd_entry_in_global_pt;
+ uint32_t __iomem *pd_addr;
+ int i;
+ int ret = -ENOMEM;
+
+ /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
+ * entries. For aliasing ppgtt support we just steal them at the end for
+ * now. */
+ first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ret;
+
+ ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+ GFP_KERNEL);
+ if (!ppgtt->pt_pages)
+ goto err_ppgtt;
+
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
+ if (!ppgtt->pt_pages[i])
+ goto err_pt_alloc;
+ }
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
+ *ppgtt->num_pd_entries,
+ GFP_KERNEL);
+ if (!ppgtt->pt_dma_addr)
+ goto err_pt_alloc;
+ }
+
+ pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+ if (dev_priv->mm.gtt->needs_dmar) {
+ pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
+ 0, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev->pdev,
+ pt_addr)) {
+ ret = -EIO;
+ goto err_pd_pin;
+
+ }
+ ppgtt->pt_dma_addr[i] = pt_addr;
+ } else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+
+ i915_ppgtt_clear_range(ppgtt, 0,
+ ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+ return 0;
+
+err_pd_pin:
+ if (ppgtt->pt_dma_addr) {
+ for (i--; i >= 0; i--)
+ pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+err_pt_alloc:
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ if (ppgtt->pt_pages[i])
+ __free_page(ppgtt->pt_pages[i]);
+ }
+ kfree(ppgtt->pt_pages);
+err_ppgtt:
+ kfree(ppgtt);
+
+ return ret;
+}
+
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int i;
+
+ if (!ppgtt)
+ return;
+
+ if (ppgtt->pt_dma_addr) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ __free_page(ppgtt->pt_pages[i]);
+ kfree(ppgtt->pt_pages);
+ kfree(ppgtt);
+}
+
+static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
+ struct scatterlist *sg_list,
+ unsigned sg_len,
+ unsigned first_entry,
+ uint32_t pte_flags)
+{
+ uint32_t *pt_vaddr, pte;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned i, j, m, segment_len;
+ dma_addr_t page_addr;
+ struct scatterlist *sg;
+
+ /* init sg walking */
+ sg = sg_list;
+ i = 0;
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+
+ while (i < sg_len) {
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
+ page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+ pt_vaddr[j] = pte | pte_flags;
+
+ /* grab the next page */
+ m++;
+ if (m == segment_len) {
+ sg = sg_next(sg);
+ i++;
+ if (i == sg_len)
+ break;
+
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+ }
+ }
+
+ kunmap_atomic(pt_vaddr);
+
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry, unsigned num_entries,
+ struct page **pages, uint32_t pte_flags)
+{
+ uint32_t *pt_vaddr, pte;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned last_pte, i;
+ dma_addr_t page_addr;
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (i = first_pte; i < last_pte; i++) {
+ page_addr = page_to_phys(*pages);
+ pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+ pt_vaddr[i] = pte | pte_flags;
+
+ pages++;
+ }
+
+ kunmap_atomic(pt_vaddr);
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t pte_flags = GEN6_PTE_VALID;
+
+ switch (cache_level) {
+ case I915_CACHE_LLC_MLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte_flags |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
+ } else
+ i915_ppgtt_insert_pages(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ pte_flags);
+}
+
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj)
+{
+ i915_ppgtt_clear_range(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+}
+
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
@@ -55,7 +328,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev)) {
+ if (i915_gpu_idle(dev_priv->dev, false)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 31d334d..1a93066 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -93,8 +93,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 6) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated with
+ * identically sized dimms. We don't need to check the 3rd
+ * channel because no cpu with gpu attached ships in that
+ * configuration. Also, swizzling only makes sense for 2
+ * channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
@@ -107,10 +122,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev)) {
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
- /* On mobile 9xx chipsets, channel interleave by the CPU is
+ /* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5bd4361..afd4e03 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -720,7 +720,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
- void __iomem *s;
void *d;
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
@@ -728,10 +727,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else {
+ void *s;
+
+ drm_clflush_pages(&src->pages[page], 1);
+
+ s = kmap_atomic(src->pages[page]);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&src->pages[page], 1);
+ }
local_irq_restore(flags);
dst->pages[page] = d;
@@ -770,11 +788,11 @@ i915_error_state_free(struct drm_device *dev,
{
int i;
- for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
- i915_error_object_free(error->batchbuffer[i]);
-
- for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
- i915_error_object_free(error->ringbuffer[i]);
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ kfree(error->ring[i].requests);
+ }
kfree(error->active_bo);
kfree(error->overlay);
@@ -804,7 +822,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : 0;
+ err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
if (++i == count)
@@ -876,6 +894,92 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL;
}
+static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS) {
+ error->instdone1 = I915_READ(INSTDONE1);
+ error->bbaddr = I915_READ64(BB_ADDR);
+ }
+ } else {
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+ if (ring->obj == NULL)
+ continue;
+
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kmalloc(count*sizeof(struct drm_i915_error_request),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -900,7 +1004,7 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
/* Account for pipe specific data like PIPE*STAT */
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
@@ -909,59 +1013,18 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
- error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
- error->instpm = I915_READ(INSTPM);
- error->error = 0;
+
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
-
- error->bcs_acthd = I915_READ(BCS_ACTHD);
- error->bcs_ipehr = I915_READ(BCS_IPEHR);
- error->bcs_ipeir = I915_READ(BCS_IPEIR);
- error->bcs_instdone = I915_READ(BCS_INSTDONE);
- error->bcs_seqno = 0;
- if (dev_priv->ring[BCS].get_seqno)
- error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
-
- error->vcs_acthd = I915_READ(VCS_ACTHD);
- error->vcs_ipehr = I915_READ(VCS_IPEHR);
- error->vcs_ipeir = I915_READ(VCS_IPEIR);
- error->vcs_instdone = I915_READ(VCS_INSTDONE);
- error->vcs_seqno = 0;
- if (dev_priv->ring[VCS].get_seqno)
- error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+ error->done_reg = I915_READ(DONE_REG);
}
- if (INTEL_INFO(dev)->gen >= 4) {
- error->ipeir = I915_READ(IPEIR_I965);
- error->ipehr = I915_READ(IPEHR_I965);
- error->instdone = I915_READ(INSTDONE_I965);
- error->instps = I915_READ(INSTPS);
- error->instdone1 = I915_READ(INSTDONE1);
- error->acthd = I915_READ(ACTHD_I965);
- error->bbaddr = I915_READ64(BB_ADDR);
- } else {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- error->bbaddr = 0;
- }
- i915_gem_record_fences(dev, error);
-
- /* Record the active batch and ring buffers */
- for (i = 0; i < I915_NUM_RINGS; i++) {
- error->batchbuffer[i] =
- i915_error_first_batchbuffer(dev_priv,
- &dev_priv->ring[i]);
- error->ringbuffer[i] =
- i915_error_object_create(dev_priv,
- dev_priv->ring[i].obj);
- }
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@@ -1017,11 +1080,12 @@ void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
+ unsigned long flags;
- spin_lock(&dev_priv->error_lock);
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
- spin_unlock(&dev_priv->error_lock);
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
i915_error_state_free(dev, error);
@@ -1698,6 +1762,7 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
if (!IS_GEN2(dev)) {
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -1705,7 +1770,6 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
-
if (kick_ring(&dev_priv->ring[RCS]))
goto repeat;
@@ -1718,7 +1782,6 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat;
}
- i915_handle_error(dev, true);
return;
}
} else {
@@ -1752,18 +1815,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xeffe);
- if (IS_GEN6(dev)) {
- /* Workaround stalls observed on Sandy Bridge GPUs by
- * making the blitter command streamer generate a
- * write to the Hardware Status Page for
- * MI_USER_INTERRUPT. This appears to serialize the
- * previous seqno write out before the interrupt
- * happens.
- */
- I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
- I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
- }
-
/* XXX hotplug from PCH */
I915_WRITE(DEIMR, 0xffffffff);
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
deleted file mode 100644
index cc8f6d4..0000000
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ /dev/null
@@ -1,387 +0,0 @@
-/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-/* This memory manager is integrated into the global/local lru
- * mechanisms used by the clients. Specifically, it operates by
- * setting the 'in_use' fields of the global LRU to indicate whether
- * this region is privately allocated to a client.
- *
- * This does require the client to actually respect that field.
- *
- * Currently no effort is made to allocate 'private' memory in any
- * clever way - the LRU information isn't used to determine which
- * block to allocate, and the ring is drained prior to allocations --
- * in other words allocation is expensive.
- */
-static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
- struct drm_tex_region *list;
- unsigned shift, nr;
- unsigned start;
- unsigned end;
- unsigned i;
- int age;
-
- shift = dev_priv->tex_lru_log_granularity;
- nr = I915_NR_TEX_REGIONS;
-
- start = p->start >> shift;
- end = (p->start + p->size - 1) >> shift;
-
- age = ++sarea_priv->texAge;
- list = sarea_priv->texList;
-
- /* Mark the regions with the new flag and update their age. Move
- * them to head of list to preserve LRU semantics.
- */
- for (i = start; i <= end; i++) {
- list[i].in_use = in_use;
- list[i].age = age;
-
- /* remove_from_list(i)
- */
- list[(unsigned)list[i].next].prev = list[i].prev;
- list[(unsigned)list[i].prev].next = list[i].next;
-
- /* insert_at_head(list, i)
- */
- list[i].prev = nr;
- list[i].next = list[nr].next;
- list[(unsigned)list[nr].next].prev = i;
- list[nr].next = i;
- }
-}
-
-/* Very simple allocator for agp memory, working on a static range
- * already mapped into each client's address space.
- */
-
-static struct mem_block *split_block(struct mem_block *p, int start, int size,
- struct drm_file *file_priv)
-{
- /* Maybe cut off the start of an existing block */
- if (start > p->start) {
- struct mem_block *newblock = kmalloc(sizeof(*newblock),
- GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start;
- newblock->size = p->size - (start - p->start);
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size -= newblock->size;
- p = newblock;
- }
-
- /* Maybe cut off the end of an existing block */
- if (size < p->size) {
- struct mem_block *newblock = kmalloc(sizeof(*newblock),
- GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start + size;
- newblock->size = p->size - size;
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size = size;
- }
-
- out:
- /* Our block is in the middle */
- p->file_priv = file_priv;
- return p;
-}
-
-static struct mem_block *alloc_block(struct mem_block *heap, int size,
- int align2, struct drm_file *file_priv)
-{
- struct mem_block *p;
- int mask = (1 << align2) - 1;
-
- for (p = heap->next; p != heap; p = p->next) {
- int start = (p->start + mask) & ~mask;
- if (p->file_priv == NULL && start + size <= p->start + p->size)
- return split_block(p, start, size, file_priv);
- }
-
- return NULL;
-}
-
-static struct mem_block *find_block(struct mem_block *heap, int start)
-{
- struct mem_block *p;
-
- for (p = heap->next; p != heap; p = p->next)
- if (p->start == start)
- return p;
-
- return NULL;
-}
-
-static void free_block(struct mem_block *p)
-{
- p->file_priv = NULL;
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- if (p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
-
- if (p->prev->file_priv == NULL) {
- struct mem_block *q = p->prev;
- q->size += p->size;
- q->next = p->next;
- q->next->prev = q;
- kfree(p);
- }
-}
-
-/* Initialize. How to check for an uninitialized heap?
- */
-static int init_heap(struct mem_block **heap, int start, int size)
-{
- struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
-
- if (!blocks)
- return -ENOMEM;
-
- *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
- if (!*heap) {
- kfree(blocks);
- return -ENOMEM;
- }
-
- blocks->start = start;
- blocks->size = size;
- blocks->file_priv = NULL;
- blocks->next = blocks->prev = *heap;
-
- memset(*heap, 0, sizeof(**heap));
- (*heap)->file_priv = (struct drm_file *) -1;
- (*heap)->next = (*heap)->prev = blocks;
- return 0;
-}
-
-/* Free all blocks associated with the releasing file.
- */
-void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
- struct mem_block *heap)
-{
- struct mem_block *p;
-
- if (!heap || !heap->next)
- return;
-
- for (p = heap->next; p != heap; p = p->next) {
- if (p->file_priv == file_priv) {
- p->file_priv = NULL;
- mark_block(dev, p, 0);
- }
- }
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- for (p = heap->next; p != heap; p = p->next) {
- while (p->file_priv == NULL && p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
- }
-}
-
-/* Shutdown.
- */
-void i915_mem_takedown(struct mem_block **heap)
-{
- struct mem_block *p;
-
- if (!*heap)
- return;
-
- for (p = (*heap)->next; p != *heap;) {
- struct mem_block *q = p;
- p = p->next;
- kfree(q);
- }
-
- kfree(*heap);
- *heap = NULL;
-}
-
-static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
-{
- switch (region) {
- case I915_MEM_REGION_AGP:
- return &dev_priv->agp_heap;
- default:
- return NULL;
- }
-}
-
-/* IOCTL HANDLERS */
-
-int i915_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_alloc_t *alloc = data;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, alloc->region);
- if (!heap || !*heap)
- return -EFAULT;
-
- /* Make things easier on ourselves: all allocations at least
- * 4k aligned.
- */
- if (alloc->alignment < 12)
- alloc->alignment = 12;
-
- block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
-
- if (!block)
- return -ENOMEM;
-
- mark_block(dev, block, 1);
-
- if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
- sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-int i915_mem_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_free_t *memfree = data;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, memfree->region);
- if (!heap || !*heap)
- return -EFAULT;
-
- block = find_block(*heap, memfree->region_offset);
- if (!block)
- return -EFAULT;
-
- if (block->file_priv != file_priv)
- return -EPERM;
-
- mark_block(dev, block, 0);
- free_block(block);
- return 0;
-}
-
-int i915_mem_init_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_init_heap_t *initheap = data;
- struct mem_block **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, initheap->region);
- if (!heap)
- return -EFAULT;
-
- if (*heap) {
- DRM_ERROR("heap already initialized?");
- return -EFAULT;
- }
-
- return init_heap(heap, initheap->start, initheap->size);
-}
-
-int i915_mem_destroy_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_destroy_heap_t *destroyheap = data;
- struct mem_block **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, destroyheap->region);
- if (!heap) {
- DRM_ERROR("get_heap failed");
- return -EFAULT;
- }
-
- if (!*heap) {
- DRM_ERROR("heap not initialized?");
- return -EFAULT;
- }
-
- i915_mem_takedown(heap);
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 558ac71..3886cf0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,12 +86,45 @@
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define GEN6_MBCTL 0x0907c
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
+
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_CACHE_BITS (3 << 1)
+#define GEN6_PTE_GFDT (1 << 3)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
+#define PP_DIR_DCLV_2G 0xffffffff
+
+#define GAM_ECOCHK 0x4090
+#define ECOCHK_SNB_BIT (1<<10)
+#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
+#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -295,6 +328,12 @@
#define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+/* control register for cpu gtt access */
+#define TILECTL 0x101000
+#define TILECTL_SWZCTL (1 << 0)
+#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
+#define TILECTL_BACKSNOOP_DIS (1 << 3)
+
/*
* Instruction and interrupt control regs
*/
@@ -318,7 +357,14 @@
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define ARB_MODE 0x04030
+#define ARB_MODE_SWIZZLE_SNB (1<<4)
+#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
+#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
+#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
+#define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define RING_ACTHD(base) ((base)+0x74)
@@ -352,6 +398,12 @@
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
+#define RING_IPEIR(base) ((base)+0x64)
+#define RING_IPEHR(base) ((base)+0x68)
+#define RING_INSTDONE(base) ((base)+0x6c)
+#define RING_INSTPS(base) ((base)+0x70)
+#define RING_DMA_FADD(base) ((base)+0x78)
+#define RING_INSTPM(base) ((base)+0xc0)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
@@ -365,14 +417,6 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
-#define VCS_INSTDONE 0x1206C
-#define VCS_IPEIR 0x12064
-#define VCS_IPEHR 0x12068
-#define VCS_ACTHD 0x12074
-#define BCS_INSTDONE 0x2206C
-#define BCS_IPEIR 0x22064
-#define BCS_IPEHR 0x22068
-#define BCS_ACTHD 0x22074
#define ERROR_GEN6 0x040a0
@@ -391,10 +435,11 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
-# define MI_FLUSH_ENABLE (1 << 11)
+# define MI_FLUSH_ENABLE (1 << 12)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
+#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -1037,6 +1082,29 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
+/** snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define MAD_DIMM_ECC_MASK (0x3 << 24)
+#define MAD_DIMM_ECC_OFF (0x0 << 24)
+#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define MAD_DIMM_ECC_ON (0x3 << 24)
+#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
+#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
+#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
+#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
+#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
+#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
+#define MAD_DIMM_A_SELECT (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define MAD_DIMM_B_SIZE_SHIFT 8
+#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define MAD_DIMM_A_SIZE_SHIFT 0
+#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1316,6 +1384,7 @@
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
+#define _VSYNCSHIFT_A 0x60028
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
@@ -1326,6 +1395,8 @@
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
+#define _VSYNCSHIFT_B 0x61028
+
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
@@ -1334,6 +1405,7 @@
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
+#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -2319,10 +2391,21 @@
#define PIPECONF_PALETTE 0
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
-#define PIPECONF_PROGRESSIVE (0 << 21)
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_INTERLACE_MASK (7 << 21)
+/* Note that pre-gen3 does not support interlaced display directly. Panel
+ * fitting must be disabled on pre-ilk for interlaced. */
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */
+/* Ironlake and later have a complete new set of values for interlaced. PFIT
+ * means panel fitter required, PF means progressive fetch, DBL means power
+ * saving pixel doubling. */
+#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
+#define PIPECONF_INTERLACED_ILK (3 << 21)
+#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
+#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
@@ -3219,6 +3302,7 @@
#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
+#define _TRANS_VSYNCSHIFT_A 0xe0028
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
@@ -3249,6 +3333,7 @@
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
+#define _TRANS_VSYNCSHIFT_B 0xe1028
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
@@ -3256,6 +3341,8 @@
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
+ _TRANS_VSYNCSHIFT_B)
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
@@ -3289,7 +3376,10 @@
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_VIDEO_AUDIO (0<<26)
+#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
+#define TRANS_INTERLACED (3<<21)
+#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
#define TRANS_8BPC (0<<5)
#define TRANS_10BPC (1<<5)
#define TRANS_6BPC (2<<5)
@@ -3628,6 +3718,12 @@
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
+#define GTFIFODBG 0x120000
+#define GT_FIFO_CPU_ERROR_MASK 7
+#define GT_FIFO_OVFERR (1<<2)
+#define GT_FIFO_IAWRERR (1<<1)
+#define GT_FIFO_IARDERR (1<<0)
+
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
@@ -3757,4 +3853,16 @@
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
+#define IBX_AUD_CONFIG_A 0xe2000
+#define CPT_AUD_CONFIG_A 0xe5000
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index cb91210..bae3edf 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -208,7 +208,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
if (ret < 0) {
- DRM_ERROR("failed to get supported _DSM functions\n");
+ DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 63880e2..8168d8f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -572,7 +572,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
- dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+ dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
@@ -669,7 +669,7 @@ intel_parse_bios(struct drm_device *dev)
}
if (!vbt) {
- DRM_ERROR("VBT signature missing\n");
+ DRM_DEBUG_DRIVER("VBT signature missing\n");
pci_unmap_rom(pdev, bios);
return -1;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dd729d4..4d3d736 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -594,7 +594,10 @@ void intel_crt_init(struct drm_device *dev)
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
crt->base.crtc_mask = (1 << 0) | (1 << 1);
- connector->interlace_allowed = 1;
+ if (IS_GEN2(dev))
+ connector->interlace_allowed = 0;
+ else
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 397087c..d514719 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -75,7 +75,7 @@ struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
+ int, int, intel_clock_t *, intel_clock_t *);
};
/* FDI */
@@ -83,17 +83,21 @@ struct intel_limit {
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
@@ -515,7 +519,8 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
@@ -562,6 +567,9 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
this_err = abs(clock.dot - target);
if (this_err < err) {
@@ -578,7 +586,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -625,6 +634,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
this_err = abs(clock.dot - target);
if (this_err < err_most) {
@@ -642,7 +654,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
@@ -668,7 +681,8 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
@@ -922,6 +936,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
+ /* if we need the pipe A quirk it must be always on */
+ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ state = true;
+
reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
@@ -930,19 +948,24 @@ void assert_pipe(struct drm_i915_private *dev_priv,
pipe_name(pipe), state_string(state), state_string(cur_state));
}
-static void assert_plane_enabled(struct drm_i915_private *dev_priv,
- enum plane plane)
+static void assert_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, bool state)
{
int reg;
u32 val;
+ bool cur_state;
reg = DSPCNTR(plane);
val = I915_READ(reg);
- WARN(!(val & DISPLAY_PLANE_ENABLE),
- "plane %c assertion failure, should be active but is disabled\n",
- plane_name(plane));
+ cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+ WARN(cur_state != state,
+ "plane %c assertion failure (expected %s, current %s)\n",
+ plane_name(plane), state_string(state), state_string(cur_state));
}
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -951,8 +974,14 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
int cur_pipe;
/* Planes are fixed to pipes on ILK+ */
- if (HAS_PCH_SPLIT(dev_priv->dev))
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ reg = DSPCNTR(pipe);
+ val = I915_READ(reg);
+ WARN((val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be disabled but not\n",
+ plane_name(pipe));
return;
+ }
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
@@ -1071,7 +1100,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
{
u32 val = I915_READ(reg);
WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -1237,7 +1266,8 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
- u32 val;
+ u32 val, pipeconf_val;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1251,6 +1281,7 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
+ pipeconf_val = I915_READ(PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
@@ -1258,8 +1289,19 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
* that in pipeconf reg.
*/
val &= ~PIPE_BPC_MASK;
- val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ val |= pipeconf_val & PIPE_BPC_MASK;
}
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if (HAS_PCH_IBX(dev_priv->dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ val |= TRANS_LEGACY_INTERLACED_ILK;
+ else
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -2012,6 +2054,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
+
+ i915_gem_object_pin_fence(obj);
}
dev_priv->mm.interruptible = true;
@@ -2024,6 +2068,12 @@ err_interruptible:
return ret;
}
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_fence(obj);
+ i915_gem_object_unpin(obj);
+}
+
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
{
@@ -2255,7 +2305,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
@@ -2263,7 +2313,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
- i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
mutex_unlock(&dev->struct_mutex);
@@ -2936,6 +2986,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+ I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
intel_fdi_normal_train(crtc);
@@ -3321,10 +3372,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -3398,11 +3451,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return false;
}
- /* XXX some encoders set the crtcinfo, others don't.
- * Obviously we need some form of conflict resolution here...
- */
- if (adjusted_mode->crtc_htotal == 0)
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* All interlaced capable intel hw wants timings in frames. */
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -4521,6 +4571,7 @@ void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
@@ -4529,8 +4580,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
@@ -4541,8 +4594,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
@@ -4555,8 +4610,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEC_IVB,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
@@ -4709,6 +4766,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
int sprite_wm, reg;
int ret;
@@ -4735,7 +4793,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
return;
}
- I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
@@ -4977,6 +5037,82 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
return display_bpc != bpc;
}
+static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
+
+ return refclk;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock)
+{
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (adjusted_mode->clock >= 100000
+ && adjusted_mode->clock < 140500) {
+ clock->p1 = 2;
+ clock->p2 = 10;
+ clock->n = 3;
+ clock->m1 = 16;
+ clock->m2 = 8;
+ } else if (adjusted_mode->clock >= 140500
+ && adjusted_mode->clock <= 200000) {
+ clock->p1 = 1;
+ clock->p2 = 10;
+ clock->n = 6;
+ clock->m1 = 12;
+ clock->m2 = 8;
+ }
+}
+
+static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
+ intel_clock_t *clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev)) {
+ fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = (1 << reduced_clock->n) << 16 |
+ reduced_clock->m1 << 8 | reduced_clock->m2;
+ } else {
+ fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
+ reduced_clock->m2;
+ }
+
+ I915_WRITE(FP0(pipe), fp);
+
+ intel_crtc->lowfreq_avail = false;
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ reduced_clock && i915_powersave) {
+ I915_WRITE(FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(FP1(pipe), fp);
+ }
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4990,7 +5126,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, dspcntr, pipeconf, vsyncshift;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -5031,15 +5167,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else if (!IS_GEN2(dev)) {
- refclk = 96000;
- } else {
- refclk = 48000;
- }
+ refclk = i9xx_get_refclk(crtc, num_connectors);
/*
* Returns a set of divisors for the desired target clock with the given
@@ -5047,7 +5175,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -5057,53 +5186,24 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
+ &clock,
&reduced_clock);
- if (has_reduced_clock && (clock.p != reduced_clock.p)) {
- /*
- * If the different P is found, it means that we can't
- * switch the display clock by using the FP0/FP1.
- * In such case we will disable the LVDS downclock
- * feature.
- */
- DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
- has_reduced_clock = 0;
- }
- }
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (is_sdvo && is_tv) {
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 3;
- clock.m1 = 16;
- clock.m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 6;
- clock.m1 = 12;
- clock.m2 = 8;
- }
}
- if (IS_PINEVIEW(dev)) {
- fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = (1 << reduced_clock.n) << 16 |
- reduced_clock.m1 << 8 | reduced_clock.m2;
- } else {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
- }
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+
+ i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
+ &reduced_clock : NULL);
dpll = DPLL_VGA_MODE_DIS;
@@ -5177,8 +5277,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- /* Ironlake's plane is forced to pipe, bit 24 is to
- enable color space conversion */
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -5213,7 +5311,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- I915_WRITE(FP0(pipe), fp);
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
@@ -5300,34 +5397,32 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
- intel_crtc->lowfreq_avail = false;
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(FP1(pipe), fp2);
- intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
+ if (HAS_PIPE_CXSR(dev)) {
+ if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
- } else {
- I915_WRITE(FP1(pipe), fp);
- if (HAS_PIPE_CXSR(dev)) {
+ } else {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
pipeconf &= ~PIPECONF_INTERLACE_MASK;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (!IS_GEN2(dev) &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
- adjusted_mode->crtc_vsync_end -= 1;
- adjusted_mode->crtc_vsync_start -= 1;
- } else
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2;
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ vsyncshift = 0;
+ }
+
+ if (!IS_GEN3(dev))
+ I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5593,7 +5688,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -5603,21 +5699,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
+ &clock,
&reduced_clock);
- if (has_reduced_clock && (clock.p != reduced_clock.p)) {
- /*
- * If the different P is found, it means that we can't
- * switch the display clock by using the FP0/FP1.
- * In such case we will disable the LVDS downclock
- * feature.
- */
- DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
- has_reduced_clock = 0;
- }
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
@@ -5914,16 +6006,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
- adjusted_mode->crtc_vsync_end -= 1;
- adjusted_mode->crtc_vsync_start -= 1;
- } else
+ I915_WRITE(VSYNCSHIFT(pipe),
+ adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2);
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ I915_WRITE(VSYNCSHIFT(pipe), 0);
+ }
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5966,12 +6059,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev, pipe);
- if (IS_GEN5(dev)) {
- /* enable address swizzle for tiling buffer */
- temp = I915_READ(DISP_ARB_CTL);
- I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
- }
-
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
@@ -6086,15 +6173,18 @@ static void ironlake_write_eld(struct drm_connector *connector,
uint32_t i;
int len;
int hdmiw_hdmiedid;
+ int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+ aud_config = IBX_AUD_CONFIG_A;
aud_cntl_st = IBX_AUD_CNTL_ST_A;
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+ aud_config = CPT_AUD_CONFIG_A;
aud_cntl_st = CPT_AUD_CNTL_ST_A;
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
@@ -6102,6 +6192,7 @@ static void ironlake_write_eld(struct drm_connector *connector,
i = to_intel_crtc(crtc)->pipe;
hdmiw_hdmiedid += i * 0x100;
aud_cntl_st += i * 0x100;
+ aud_config += i * 0x100;
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
@@ -6121,7 +6212,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- }
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
@@ -6927,9 +7020,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
- /* Unlock panel regs */
- I915_WRITE(PP_CONTROL,
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ assert_panel_unlocked(dev_priv, pipe);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -6938,9 +7029,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-
- /* ...and lock them again */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
/* Schedule downclock */
@@ -6970,9 +7058,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
DRM_DEBUG_DRIVER("downclocking LVDS\n");
- /* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
- PANEL_UNLOCK_REGS);
+ assert_panel_unlocked(dev_priv, pipe);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -6980,9 +7066,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
-
- /* ...and lock them again */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
}
@@ -7097,7 +7180,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->old_fb_obj);
+ intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
@@ -7247,7 +7330,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
+ OUT_RING(0); /* aux display base address, unused */
ADVANCE_LP_RING();
out:
return ret;
@@ -7681,10 +7764,9 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- bool has_lvds = false;
+ bool has_lvds;
- if (IS_MOBILE(dev) && !IS_I830(dev))
- has_lvds = intel_lvds_init(dev);
+ has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
@@ -7840,7 +7922,8 @@ int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_VYUY:
break;
default:
- DRM_ERROR("unsupported pixel format\n");
+ DRM_DEBUG_KMS("unsupported pixel format %u\n",
+ mode_cmd->pixel_format);
return -EINVAL;
}
@@ -8162,6 +8245,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
int cur_freq, min_freq, max_freq;
int i;
@@ -8173,6 +8257,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
@@ -8959,8 +9050,6 @@ struct intel_quirk {
};
struct intel_quirk intel_quirks[] = {
- /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
- { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -9037,6 +9126,9 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
dev->mode_config.funcs = (void *)&intel_mode_funcs;
intel_init_quirks(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 94f860c..110552f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -49,7 +49,7 @@ struct intel_dp {
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
- int force_audio;
+ enum hdmi_force_audio force_audio;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
@@ -352,7 +352,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
- int try, precharge;
+ int try, precharge = 5;
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
@@ -368,15 +368,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
+ aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
- if (IS_GEN6(dev))
- precharge = 3;
- else
- precharge = 5;
-
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl);
@@ -421,6 +416,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -2117,8 +2116,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (status != connector_status_connected)
return status;
- if (intel_dp->force_audio) {
- intel_dp->has_audio = intel_dp->force_audio > 0;
+ if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
+ intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
@@ -2218,10 +2217,10 @@ intel_dp_set_property(struct drm_connector *connector,
intel_dp->force_audio = i;
- if (i == 0)
+ if (i == HDMI_AUDIO_AUTO)
has_audio = intel_dp_detect_audio(connector);
else
- has_audio = i > 0;
+ has_audio = (i == HDMI_AUDIO_ON);
if (has_audio == intel_dp->has_audio)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1348705..9cec6c3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -374,6 +374,7 @@ extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
+extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 6eda1b5..020a7d7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -157,7 +157,6 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
C(vsync_end);
C(vtotal);
C(clock);
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
#undef C
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 571375a..2d87669 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -152,11 +152,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 64541f7..cae3e5f 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -44,7 +44,7 @@ struct intel_hdmi {
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
- int force_audio;
+ enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
};
@@ -339,7 +339,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
@@ -347,8 +349,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
}
if (status == connector_status_connected) {
- if (intel_hdmi->force_audio)
- intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
+ if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+ intel_hdmi->has_audio =
+ (intel_hdmi->force_audio == HDMI_AUDIO_ON);
}
return status;
@@ -402,7 +405,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
return ret;
if (property == dev_priv->force_audio_property) {
- int i = val;
+ enum hdmi_force_audio i = val;
bool has_audio;
if (i == intel_hdmi->force_audio)
@@ -410,13 +413,13 @@ intel_hdmi_set_property(struct drm_connector *connector,
intel_hdmi->force_audio = i;
- if (i == 0)
+ if (i == HDMI_AUDIO_AUTO)
has_audio = intel_hdmi_detect_audio(connector);
else
- has_audio = i > 0;
+ has_audio = (i == HDMI_AUDIO_ON);
- if (has_audio == intel_hdmi->has_audio)
- return 0;
+ if (i == HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink = 0;
intel_hdmi->has_audio = has_audio;
goto done;
@@ -514,7 +517,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- connector->interlace_allowed = 0;
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d30cccc..601c86e 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -37,7 +37,7 @@
/* Intel GPIO access functions */
-#define I2C_RISEFALL_TIME 20
+#define I2C_RISEFALL_TIME 10
static inline struct intel_gmbus *
to_intel_gmbus(struct i2c_adapter *i2c)
@@ -45,13 +45,6 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
-struct intel_gpio {
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
- struct drm_i915_private *dev_priv;
- u32 reg;
-};
-
void
intel_i2c_reset(struct drm_device *dev)
{
@@ -78,15 +71,15 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
I915_WRITE(DSPCLK_GATE_D, val);
}
-static u32 get_reserved(struct intel_gpio *gpio)
+static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *dev_priv = gpio->dev_priv;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ_NOTRACE(gpio->reg) &
+ reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -95,29 +88,29 @@ static u32 get_reserved(struct intel_gpio *gpio)
static int get_clock(void *data)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
- I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE_NOTRACE(gpio->reg, reserved);
- return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
- I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE_NOTRACE(gpio->reg, reserved);
- return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
u32 clock_bits;
if (state_high)
@@ -126,15 +119,15 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
- POSTING_READ(gpio->reg);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
+ POSTING_READ(bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
u32 data_bits;
if (state_high)
@@ -143,13 +136,14 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
- POSTING_READ(gpio->reg);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
+ POSTING_READ(bus->gpio_reg);
}
-static struct i2c_adapter *
-intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
+static bool
+intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
+ struct drm_i915_private *dev_priv = bus->dev_priv;
static const int map_pin_to_reg[] = {
0,
GPIOB,
@@ -160,65 +154,48 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
0,
GPIOF,
};
- struct intel_gpio *gpio;
+ struct i2c_algo_bit_data *algo;
if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
- return NULL;
+ return false;
- gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
- if (gpio == NULL)
- return NULL;
+ algo = &bus->bit_algo;
- gpio->reg = map_pin_to_reg[pin];
+ bus->gpio_reg = map_pin_to_reg[pin];
if (HAS_PCH_SPLIT(dev_priv->dev))
- gpio->reg += PCH_GPIOA - GPIOA;
- gpio->dev_priv = dev_priv;
-
- snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
- "i915 GPIO%c", "?BACDE?F"[pin]);
- gpio->adapter.owner = THIS_MODULE;
- gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
- gpio->algo.setsda = set_data;
- gpio->algo.setscl = set_clock;
- gpio->algo.getsda = get_data;
- gpio->algo.getscl = get_clock;
- gpio->algo.udelay = I2C_RISEFALL_TIME;
- gpio->algo.timeout = usecs_to_jiffies(2200);
- gpio->algo.data = gpio;
-
- if (i2c_bit_add_bus(&gpio->adapter))
- goto out_free;
-
- return &gpio->adapter;
-
-out_free:
- kfree(gpio);
- return NULL;
+ bus->gpio_reg += PCH_GPIOA - GPIOA;
+
+ bus->adapter.algo_data = algo;
+ algo->setsda = set_data;
+ algo->setscl = set_clock;
+ algo->getsda = get_data;
+ algo->getscl = get_clock;
+ algo->udelay = I2C_RISEFALL_TIME;
+ algo->timeout = usecs_to_jiffies(2200);
+ algo->data = bus;
+
+ return true;
}
static int
-intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
- struct i2c_adapter *adapter,
+intel_i2c_quirk_xfer(struct intel_gmbus *bus,
struct i2c_msg *msgs,
int num)
{
- struct intel_gpio *gpio = container_of(adapter,
- struct intel_gpio,
- adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
int ret;
intel_i2c_reset(dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
- set_data(gpio, 1);
- set_clock(gpio, 1);
+ set_data(bus, 1);
+ set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
- ret = adapter->algo->master_xfer(adapter, msgs, num);
+ ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
- set_data(gpio, 1);
- set_clock(gpio, 1);
+ set_data(bus, 1);
+ set_clock(bus, 1);
intel_i2c_quirk_set(dev_priv, false);
return ret;
@@ -232,12 +209,15 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
- struct drm_i915_private *dev_priv = adapter->algo_data;
- int i, reg_offset;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int i, reg_offset, ret;
- if (bus->force_bit)
- return intel_i2c_quirk_xfer(dev_priv,
- bus->force_bit, msgs, num);
+ mutex_lock(&dev_priv->gmbus_mutex);
+
+ if (bus->force_bit) {
+ ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ goto out;
+ }
reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
@@ -249,7 +229,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (msgs[i].flags & I2C_M_RD) {
I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ GMBUS_CYCLE_WAIT |
+ (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
@@ -278,7 +259,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
- (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ GMBUS_CYCLE_WAIT |
+ (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
@@ -317,11 +299,15 @@ clear_err:
I915_WRITE(GMBUS1 + reg_offset, 0);
done:
- /* Mark the GMBUS interface as disabled. We will re-enable it at the
- * start of the next xfer, till then let it sleep.
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
*/
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
+ DRM_INFO("GMBUS timed out waiting for idle\n");
I915_WRITE(GMBUS0 + reg_offset, 0);
- return i;
+ ret = i;
+ goto out;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
@@ -329,23 +315,21 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
- if (!bus->force_bit)
- return -ENOMEM;
-
- return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+ if (!bus->has_gpio) {
+ ret = -EIO;
+ } else {
+ bus->force_bit = true;
+ ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ }
+out:
+ mutex_unlock(&dev_priv->gmbus_mutex);
+ return ret;
}
static u32 gmbus_func(struct i2c_adapter *adapter)
{
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
-
- if (bus->force_bit)
- bus->force_bit->algo->functionality(bus->force_bit);
-
- return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ return i2c_bit_algo.functionality(adapter) &
+ (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
/* I2C_FUNC_10BIT_ADDR | */
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
@@ -375,11 +359,13 @@ int intel_setup_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
GFP_KERNEL);
if (dev_priv->gmbus == NULL)
return -ENOMEM;
+ mutex_init(&dev_priv->gmbus_mutex);
+
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
@@ -391,7 +377,7 @@ int intel_setup_gmbus(struct drm_device *dev)
names[i]);
bus->adapter.dev.parent = &dev->pdev->dev;
- bus->adapter.algo_data = dev_priv;
+ bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
ret = i2c_add_adapter(&bus->adapter);
@@ -401,8 +387,11 @@ int intel_setup_gmbus(struct drm_device *dev)
/* By default use a conservative clock rate */
bus->reg0 = i | GMBUS_RATE_100KHZ;
+ bus->has_gpio = intel_gpio_setup(bus, i);
+
/* XXX force bit banging until GMBUS is fully debugged */
- bus->force_bit = intel_gpio_create(dev_priv, i);
+ if (bus->has_gpio && IS_GEN2(dev))
+ bus->force_bit = true;
}
intel_i2c_reset(dev_priv->dev);
@@ -430,19 +419,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- if (force_bit) {
- if (bus->force_bit == NULL) {
- struct drm_i915_private *dev_priv = adapter->algo_data;
- bus->force_bit = intel_gpio_create(dev_priv,
- bus->reg0 & 0xff);
- }
- } else {
- if (bus->force_bit) {
- i2c_del_adapter(bus->force_bit);
- kfree(bus->force_bit);
- bus->force_bit = NULL;
- }
- }
+ if (bus->has_gpio)
+ bus->force_bit = force_bit;
}
void intel_teardown_gmbus(struct drm_device *dev)
@@ -455,10 +433,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
- if (bus->force_bit) {
- i2c_del_adapter(bus->force_bit);
- kfree(bus->force_bit);
- }
i2c_del_adapter(&bus->adapter);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa84832..c5c0973 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -739,6 +739,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
+ },
+ },
{ } /* terminating entry */
};
@@ -844,6 +860,18 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
+static bool intel_lvds_supported(struct drm_device *dev)
+{
+ /* With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it. */
+ if (HAS_PCH_SPLIT(dev))
+ return true;
+
+ /* Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm */
+ return IS_MOBILE(dev) && !IS_I830(dev);
+}
+
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -865,6 +893,9 @@ bool intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
+ if (!intel_lvds_supported(dev))
+ return false;
+
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return false;
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index be2c6fe..d1928e7 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -28,6 +28,7 @@
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include "drmP.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drv.h"
@@ -42,13 +43,13 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
u8 buf[2];
struct i2c_msg msgs[] = {
{
- .addr = 0x50,
+ .addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = 0x50,
+ .addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
@@ -83,10 +84,11 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
-static const char *force_audio_names[] = {
- "off",
- "auto",
- "on",
+static const struct drm_prop_enum_list force_audio_names[] = {
+ { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+ { HDMI_AUDIO_OFF, "off" },
+ { HDMI_AUDIO_AUTO, "auto" },
+ { HDMI_AUDIO_ON, "on" },
};
void
@@ -95,27 +97,24 @@ intel_attach_force_audio_property(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
- int i;
prop = dev_priv->force_audio_property;
if (prop == NULL) {
- prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ prop = drm_property_create_enum(dev, 0,
"audio",
+ force_audio_names,
ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
- drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
-
dev_priv->force_audio_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
}
-static const char *broadcast_rgb_names[] = {
- "Full",
- "Limited 16:235",
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+ { 0, "Full" },
+ { 1, "Limited 16:235" },
};
void
@@ -124,19 +123,16 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
- int i;
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
- prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
+ broadcast_rgb_names,
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
- drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
-
dev_priv->broadcast_rgb_property = prop;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index cdf17d4..80b331c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,8 +25,6 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-
-#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -227,7 +225,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+ true);
if (ret)
return ret;
@@ -263,7 +262,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_crtcinfo(mode, 0);
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@@ -448,7 +447,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+ true);
if (ret)
return ret;
@@ -935,10 +935,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
- if (rec->dst_x < mode->crtc_hdisplay &&
- rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
- rec->dst_y < mode->crtc_vdisplay &&
- rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
+ if (rec->dst_x < mode->hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->hdisplay &&
+ rec->dst_y < mode->vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->vdisplay)
return 0;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 04d79fd..230a141 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -48,7 +48,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -141,8 +141,8 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
dev_priv->saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->saveBLC_PWM_CTL);
- val = dev_priv->saveBLC_PWM_CTL;
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL2;
}
} else {
val = I915_READ(BLC_PWM_CTL);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5361915..fc66af6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -52,20 +52,6 @@ static inline int ring_space(struct intel_ring_buffer *ring)
return space;
}
-static u32 i915_gem_get_seqno(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
-
- seqno = dev_priv->next_seqno;
-
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
-
- return seqno;
-}
-
static int
render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -399,8 +385,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen > 3) {
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- if (IS_GEN6(dev) || IS_GEN7(dev))
- mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
@@ -467,7 +451,7 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
- *seqno = i915_gem_get_seqno(ring->dev);
+ *seqno = i915_gem_next_request_seqno(ring);
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
@@ -565,8 +549,7 @@ static int
pc_render_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
- struct drm_device *dev = ring->dev;
- u32 seqno = i915_gem_get_seqno(dev);
+ u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -600,6 +583,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@@ -617,8 +601,7 @@ static int
render_ring_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
- struct drm_device *dev = ring->dev;
- u32 seqno = i915_gem_get_seqno(dev);
+ u32 seqno = i915_gem_next_request_seqno(ring);
int ret;
ret = intel_ring_begin(ring, 4);
@@ -744,13 +727,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
- case RING_RENDER:
+ case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
- case RING_BLT:
+ case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
- case RING_BSD:
+ case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
}
@@ -792,7 +775,7 @@ ring_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- seqno = i915_gem_get_seqno(ring->dev);
+ seqno = i915_gem_next_request_seqno(ring);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
@@ -816,8 +799,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
/* It looks like we need to prevent the gt from suspending while waiting
* for an notifiy irq, otherwise irqs seem to get lost on at least the
* blt/bsd rings on ivb. */
- if (IS_GEN7(dev))
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv);
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
@@ -844,8 +826,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
}
spin_unlock(&ring->irq_lock);
- if (IS_GEN7(dev))
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv);
}
static bool
@@ -1127,11 +1108,93 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return 0;
}
+static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ bool was_interruptible;
+ int ret;
+
+ /* XXX As we have not yet audited all the paths to check that
+ * they are ready for ERESTARTSYS from intel_ring_begin, do not
+ * allow us to be interruptible by a signal.
+ */
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ ret = i915_wait_request(ring, seqno, true);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ if (ring->last_retired_head != -1) {
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ int space;
+
+ if (request->tail == -1)
+ continue;
+
+ space = request->tail - (ring->tail + 8);
+ if (space < 0)
+ space += ring->size;
+ if (space >= n) {
+ seqno = request->seqno;
+ break;
+ }
+
+ /* Consume this request in case we need more space than
+ * is available and so need to prevent a race between
+ * updating last_retired_head and direct reads of
+ * I915_RING_HEAD. It also provides a nice sanity check.
+ */
+ request->tail = -1;
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = intel_ring_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(ring->last_retired_head == -1))
+ return -ENOSPC;
+
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (WARN_ON(ring->space < n))
+ return -ENOSPC;
+
+ return 0;
+}
+
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
+ int ret;
+
+ ret = intel_ring_wait_request(ring, n);
+ if (ret != -ENOSPC)
+ return ret;
trace_i915_ring_wait_begin(ring);
if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -1200,7 +1263,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
static const struct intel_ring_buffer render_ring = {
.name = "render ring",
- .id = RING_RENDER,
+ .id = RCS,
.mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_render_ring,
@@ -1223,7 +1286,7 @@ static const struct intel_ring_buffer render_ring = {
static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
- .id = RING_BSD,
+ .id = VCS,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
@@ -1333,7 +1396,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring",
- .id = RING_BSD,
+ .id = VCS,
.mmio_base = GEN6_BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
@@ -1369,79 +1432,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring)
GEN6_BLITTER_USER_INTERRUPT);
}
-
-/* Workaround for some stepping of SNB,
- * each time when BLT engine ring tail moved,
- * the first command in the ring to be parsed
- * should be MI_BATCH_BUFFER_START
- */
-#define NEED_BLT_WORKAROUND(dev) \
- (IS_GEN6(dev) && (dev->pdev->revision < 8))
-
-static inline struct drm_i915_gem_object *
-to_blt_workaround(struct intel_ring_buffer *ring)
-{
- return ring->private;
-}
-
-static int blt_ring_init(struct intel_ring_buffer *ring)
-{
- if (NEED_BLT_WORKAROUND(ring->dev)) {
- struct drm_i915_gem_object *obj;
- u32 *ptr;
- int ret;
-
- obj = i915_gem_alloc_object(ring->dev, 4096);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = i915_gem_object_pin(obj, 4096, true);
- if (ret) {
- drm_gem_object_unreference(&obj->base);
- return ret;
- }
-
- ptr = kmap(obj->pages[0]);
- *ptr++ = MI_BATCH_BUFFER_END;
- *ptr++ = MI_NOOP;
- kunmap(obj->pages[0]);
-
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret) {
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
- return ret;
- }
-
- ring->private = obj;
- }
-
- return init_ring_common(ring);
-}
-
-static int blt_ring_begin(struct intel_ring_buffer *ring,
- int num_dwords)
-{
- if (ring->private) {
- int ret = intel_ring_begin(ring, num_dwords+2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
-
- return 0;
- } else
- return intel_ring_begin(ring, 4);
-}
-
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
int ret;
- ret = blt_ring_begin(ring, 4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
@@ -1456,22 +1453,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
-static void blt_ring_cleanup(struct intel_ring_buffer *ring)
-{
- if (!ring->private)
- return;
-
- i915_gem_object_unpin(ring->private);
- drm_gem_object_unreference(ring->private);
- ring->private = NULL;
-}
-
static const struct intel_ring_buffer gen6_blt_ring = {
.name = "blt ring",
- .id = RING_BLT,
+ .id = BCS,
.mmio_base = BLT_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = blt_ring_init,
+ .init = init_ring_common,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
.add_request = gen6_add_request,
@@ -1479,7 +1466,6 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .cleanup = blt_ring_cleanup,
.sync_to = gen6_blt_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_BR,
MI_SEMAPHORE_SYNC_BV,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68281c9..bc0365b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,13 +1,6 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
-enum {
- RCS = 0x0,
- VCS,
- BCS,
- I915_NUM_RINGS,
-};
-
struct intel_hw_status_page {
u32 __iomem *page_addr;
unsigned int gfx_addr;
@@ -36,10 +29,11 @@ struct intel_hw_status_page {
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
- RING_RENDER = 0x1,
- RING_BSD = 0x2,
- RING_BLT = 0x4,
+ RCS = 0x0,
+ VCS,
+ BCS,
} id;
+#define I915_NUM_RINGS 3
u32 mmio_base;
void __iomem *virtual_start;
struct drm_device *dev;
@@ -52,6 +46,16 @@ struct intel_ring_buffer {
int effective_size;
struct intel_hw_status_page status_page;
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
@@ -119,6 +123,12 @@ struct intel_ring_buffer {
void *private;
};
+static inline unsigned
+intel_ring_flag(struct intel_ring_buffer *ring)
+{
+ return 1 << ring->id;
+}
+
static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other)
@@ -193,6 +203,11 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+{
+ return ring->tail;
+}
+
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e334ec3..e36b171 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -148,7 +148,7 @@ struct intel_sdvo_connector {
/* Mark the type of connector */
uint16_t output_flag;
- int force_audio;
+ enum hdmi_force_audio force_audio;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
@@ -944,7 +944,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -1310,8 +1309,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (intel_sdvo_connector->force_audio)
- intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
+ if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+ intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
}
return status;
@@ -1684,10 +1683,10 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = i;
- if (i == 0)
+ if (i == HDMI_AUDIO_AUTO)
has_audio = intel_sdvo_detect_hdmi_audio(connector);
else
- has_audio = i > 0;
+ has_audio = (i == HDMI_AUDIO_ON);
if (has_audio == intel_sdvo->has_hdmi_audio)
return 0;
@@ -1985,7 +1984,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
drm_connector_helper_add(&connector->base.base,
&intel_sdvo_connector_helper_funcs);
- connector->base.base.interlace_allowed = 0;
+ connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -2277,10 +2276,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_##name = data_value[0]; \
intel_sdvo_connector->cur_##name = response; \
intel_sdvo_connector->name = \
- drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
- intel_sdvo_connector->name->values[0] = 0; \
- intel_sdvo_connector->name->values[1] = data_value[0]; \
drm_connector_attach_property(connector, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
@@ -2314,25 +2311,19 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->left_margin = data_value[0] - response;
intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
intel_sdvo_connector->left =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
if (!intel_sdvo_connector->left)
return false;
- intel_sdvo_connector->left->values[0] = 0;
- intel_sdvo_connector->left->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
intel_sdvo_connector->right =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
if (!intel_sdvo_connector->right)
return false;
- intel_sdvo_connector->right->values[0] = 0;
- intel_sdvo_connector->right->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
@@ -2356,25 +2347,21 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->top_margin = data_value[0] - response;
intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
intel_sdvo_connector->top =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
+ drm_property_create_range(dev, 0,
+ "top_margin", 0, data_value[0]);
if (!intel_sdvo_connector->top)
return false;
- intel_sdvo_connector->top->values[0] = 0;
- intel_sdvo_connector->top->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
intel_sdvo_connector->bottom =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
+ drm_property_create_range(dev, 0,
+ "bottom_margin", 0, data_value[0]);
if (!intel_sdvo_connector->bottom)
return false;
- intel_sdvo_connector->bottom->values[0] = 0;
- intel_sdvo_connector->bottom->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
@@ -2403,12 +2390,10 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_dot_crawl = 1;
intel_sdvo_connector->cur_dot_crawl = response & 0x1;
intel_sdvo_connector->dot_crawl =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
if (!intel_sdvo_connector->dot_crawl)
return false;
- intel_sdvo_connector->dot_crawl->values[0] = 0;
- intel_sdvo_connector->dot_crawl->values[1] = 1;
drm_connector_attach_property(connector,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a083504..7aa0450 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -501,7 +501,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
mutex_lock(&dev->struct_mutex);
}
- i915_gem_object_unpin(old_obj);
+ intel_unpin_fb_obj(old_obj);
}
out_unlock:
@@ -528,7 +528,7 @@ intel_disable_plane(struct drm_plane *plane)
goto out;
mutex_lock(&dev->struct_mutex);
- i915_gem_object_unpin(intel_plane->obj);
+ intel_unpin_fb_obj(intel_plane->obj);
intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex);
out:
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 1571be3..05f765e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1240,7 +1240,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_crtcinfo(&mode, 0);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv, connector);
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 5ccb65de..507aa3d 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -403,6 +403,8 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
+ pci_set_master(dev->pdev);
+
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 9f27e3d..1a2ad7e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -14,7 +14,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
+ nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
+ nv50_fb.o nvc0_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e5cbead..8dbeeea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -65,195 +65,232 @@ static bool nv_cksum(const uint8_t *data, unsigned int length)
}
static int
-score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
+score_vbios(struct nvbios *bios, const bool writeable)
{
- if (!(data[0] == 0x55 && data[1] == 0xAA)) {
- NV_TRACEWARN(dev, "... BIOS signature not found\n");
+ if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
+ NV_TRACEWARN(bios->dev, "... BIOS signature not found\n");
return 0;
}
- if (nv_cksum(data, data[2] * 512)) {
- NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
+ if (nv_cksum(bios->data, bios->data[2] * 512)) {
+ NV_TRACEWARN(bios->dev, "... BIOS checksum invalid\n");
/* if a ro image is somewhat bad, it's probably all rubbish */
return writeable ? 2 : 1;
- } else
- NV_TRACE(dev, "... appears to be valid\n");
+ }
+ NV_TRACE(bios->dev, "... appears to be valid\n");
return 3;
}
-static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
+static void
+bios_shadow_prom(struct nvbios *bios)
{
+ struct drm_device *dev = bios->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t pci_nv_20, save_pci_nv_20;
- int pcir_ptr;
+ u32 pcireg, access;
+ u16 pcir;
int i;
+ /* enable access to rom */
if (dev_priv->card_type >= NV_50)
- pci_nv_20 = 0x88050;
+ pcireg = 0x088050;
else
- pci_nv_20 = NV_PBUS_PCI_NV_20;
+ pcireg = NV_PBUS_PCI_NV_20;
+ access = nv_mask(dev, pcireg, 0x00000001, 0x00000000);
- /* enable ROM access */
- save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
- nvWriteMC(dev, pci_nv_20,
- save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+ /* bail if no rom signature, with a workaround for a PROM reading
+ * issue on some chipsets. the first read after a period of
+ * inactivity returns the wrong result, so retry the first header
+ * byte a few times before giving up as a workaround
+ */
+ i = 16;
+ do {
+ if (nv_rd08(dev, NV_PROM_OFFSET + 0) == 0x55)
+ break;
+ } while (i--);
- /* bail if no rom signature */
- if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
- nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
+ if (!i || nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
goto out;
/* additional check (see note below) - read PCI record header */
- pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
- nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
- if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
- nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
- nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
- nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
+ pcir = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
+ nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
+ if (nv_rd08(dev, NV_PROM_OFFSET + pcir + 0) != 'P' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir + 1) != 'C' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir + 2) != 'I' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir + 3) != 'R')
goto out;
- /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a
- * a good read may be obtained by waiting or re-reading (cargocult: 5x)
- * each byte. we'll hope pramin has something usable instead
- */
- for (i = 0; i < NV_PROM_SIZE; i++)
- data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
+ /* read entire bios image to system memory */
+ bios->length = nv_rd08(dev, NV_PROM_OFFSET + 2) * 512;
+ bios->data = kmalloc(bios->length, GFP_KERNEL);
+ if (bios->data) {
+ for (i = 0; i < bios->length; i++)
+ bios->data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
+ }
out:
- /* disable ROM access */
- nvWriteMC(dev, pci_nv_20,
- save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+ /* disable access to rom */
+ nv_wr32(dev, pcireg, access);
}
-static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
+static void
+bios_shadow_pramin(struct nvbios *bios)
{
+ struct drm_device *dev = bios->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t old_bar0_pramin = 0;
+ u32 bar0 = 0;
int i;
if (dev_priv->card_type >= NV_50) {
u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
if (!addr) {
- addr = (u64)nv_rd32(dev, 0x1700) << 16;
+ addr = (u64)nv_rd32(dev, 0x001700) << 16;
addr += 0xf0000;
}
- old_bar0_pramin = nv_rd32(dev, 0x1700);
- nv_wr32(dev, 0x1700, addr >> 16);
+ bar0 = nv_mask(dev, 0x001700, 0xffffffff, addr >> 16);
}
/* bail if no rom signature */
- if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
+ if (nv_rd08(dev, NV_PRAMIN_OFFSET + 0) != 0x55 ||
nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
goto out;
- for (i = 0; i < NV_PROM_SIZE; i++)
- data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
+ bios->length = nv_rd08(dev, NV_PRAMIN_OFFSET + 2) * 512;
+ bios->data = kmalloc(bios->length, GFP_KERNEL);
+ if (bios->data) {
+ for (i = 0; i < bios->length; i++)
+ bios->data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
+ }
out:
if (dev_priv->card_type >= NV_50)
- nv_wr32(dev, 0x1700, old_bar0_pramin);
+ nv_wr32(dev, 0x001700, bar0);
}
-static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
+static void
+bios_shadow_pci(struct nvbios *bios)
+{
+ struct pci_dev *pdev = bios->dev->pdev;
+ size_t length;
+
+ if (!pci_enable_rom(pdev)) {
+ void __iomem *rom = pci_map_rom(pdev, &length);
+ if (rom) {
+ bios->data = kmalloc(length, GFP_KERNEL);
+ if (bios->data) {
+ memcpy_fromio(bios->data, rom, length);
+ bios->length = length;
+ }
+ pci_unmap_rom(pdev, rom);
+ }
+
+ pci_disable_rom(pdev);
+ }
+}
+
+static void
+bios_shadow_acpi(struct nvbios *bios)
{
- void __iomem *rom = NULL;
- size_t rom_len;
- int ret;
+ struct pci_dev *pdev = bios->dev->pdev;
+ int ptr, len, ret;
+ u8 data[3];
- ret = pci_enable_rom(dev->pdev);
- if (ret)
+ if (!nouveau_acpi_rom_supported(pdev))
return;
- rom = pci_map_rom(dev->pdev, &rom_len);
- if (!rom)
- goto out;
- memcpy_fromio(data, rom, rom_len);
- pci_unmap_rom(dev->pdev, rom);
+ ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data));
+ if (ret != sizeof(data))
+ return;
-out:
- pci_disable_rom(dev->pdev);
-}
+ bios->length = min(data[2] * 512, 65536);
+ bios->data = kmalloc(bios->length, GFP_KERNEL);
+ if (!bios->data)
+ return;
-static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
-{
- int i;
- int ret;
- int size = 64 * 1024;
+ len = bios->length;
+ ptr = 0;
+ while (len) {
+ int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len;
- if (!nouveau_acpi_rom_supported(dev->pdev))
- return;
+ ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
+ if (ret != size) {
+ kfree(bios->data);
+ bios->data = NULL;
+ return;
+ }
- for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
- ret = nouveau_acpi_get_bios_chunk(data,
- (i * ROM_BIOS_PAGE),
- ROM_BIOS_PAGE);
- if (ret <= 0)
- break;
+ len -= size;
+ ptr += size;
}
- return;
}
struct methods {
const char desc[8];
- void (*loadbios)(struct drm_device *, uint8_t *);
+ void (*shadow)(struct nvbios *);
const bool rw;
+ int score;
+ u32 size;
+ u8 *data;
};
-static struct methods shadow_methods[] = {
- { "PRAMIN", load_vbios_pramin, true },
- { "PROM", load_vbios_prom, false },
- { "PCIROM", load_vbios_pci, true },
- { "ACPI", load_vbios_acpi, true },
-};
-#define NUM_SHADOW_METHODS ARRAY_SIZE(shadow_methods)
-
-static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
-{
- struct methods *methods = shadow_methods;
- int testscore = 3;
- int scores[NUM_SHADOW_METHODS], i;
+static bool
+bios_shadow(struct drm_device *dev)
+{
+ struct methods shadow_methods[] = {
+ { "PRAMIN", bios_shadow_pramin, true, 0, 0, NULL },
+ { "PROM", bios_shadow_prom, false, 0, 0, NULL },
+ { "ACPI", bios_shadow_acpi, true, 0, 0, NULL },
+ { "PCIROM", bios_shadow_pci, true, 0, 0, NULL },
+ {}
+ };
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct methods *mthd, *best;
if (nouveau_vbios) {
- for (i = 0; i < NUM_SHADOW_METHODS; i++)
- if (!strcasecmp(nouveau_vbios, methods[i].desc))
- break;
-
- if (i < NUM_SHADOW_METHODS) {
- NV_INFO(dev, "Attempting to use BIOS image from %s\n",
- methods[i].desc);
+ mthd = shadow_methods;
+ do {
+ if (strcasecmp(nouveau_vbios, mthd->desc))
+ continue;
+ NV_INFO(dev, "VBIOS source: %s\n", mthd->desc);
- methods[i].loadbios(dev, data);
- if (score_vbios(dev, data, methods[i].rw))
+ mthd->shadow(bios);
+ mthd->score = score_vbios(bios, mthd->rw);
+ if (mthd->score)
return true;
- }
+ } while ((++mthd)->shadow);
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
- for (i = 0; i < NUM_SHADOW_METHODS; i++) {
- NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
- methods[i].desc);
- data[0] = data[1] = 0; /* avoid reuse of previous image */
- methods[i].loadbios(dev, data);
- scores[i] = score_vbios(dev, data, methods[i].rw);
- if (scores[i] == testscore)
- return true;
- }
-
- while (--testscore > 0) {
- for (i = 0; i < NUM_SHADOW_METHODS; i++) {
- if (scores[i] == testscore) {
- NV_TRACE(dev, "Using BIOS image from %s\n",
- methods[i].desc);
- methods[i].loadbios(dev, data);
- return true;
- }
+ mthd = shadow_methods;
+ do {
+ NV_TRACE(dev, "Checking %s for VBIOS\n", mthd->desc);
+ mthd->shadow(bios);
+ mthd->score = score_vbios(bios, mthd->rw);
+ mthd->size = bios->length;
+ mthd->data = bios->data;
+ } while (mthd->score != 3 && (++mthd)->shadow);
+
+ mthd = shadow_methods;
+ best = mthd;
+ do {
+ if (mthd->score > best->score) {
+ kfree(best->data);
+ best = mthd;
}
+ } while ((++mthd)->shadow);
+
+ if (best->score) {
+ NV_TRACE(dev, "Using VBIOS from %s\n", best->desc);
+ bios->length = best->size;
+ bios->data = best->data;
+ return true;
}
- NV_ERROR(dev, "No valid BIOS image found\n");
+ NV_ERROR(dev, "No valid VBIOS image found\n");
return false;
}
@@ -6334,11 +6371,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
spin_lock_init(&bios->lock);
bios->dev = dev;
- if (!NVShadowVBIOS(dev, bios->data))
- return false;
-
- bios->length = NV_PROM_SIZE;
- return true;
+ return bios_shadow(dev);
}
static int nouveau_parse_vbios_struct(struct drm_device *dev)
@@ -6498,6 +6531,10 @@ nouveau_bios_init(struct drm_device *dev)
void
nouveau_bios_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
nouveau_mxm_fini(dev);
nouveau_i2c_fini(dev);
+
+ kfree(dev_priv->vbios.data);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index a37c31e..1f3233d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -75,6 +75,8 @@ enum dcb_connector_type {
DCB_CONNECTOR_eDP = 0x47,
DCB_CONNECTOR_HDMI_0 = 0x60,
DCB_CONNECTOR_HDMI_1 = 0x61,
+ DCB_CONNECTOR_DMS59_DP0 = 0x64,
+ DCB_CONNECTOR_DMS59_DP1 = 0x65,
DCB_CONNECTOR_NONE = 0xff
};
@@ -209,6 +211,8 @@ struct nvbios {
NVBIOS_BIT
} type;
uint16_t offset;
+ uint32_t length;
+ uint8_t *data;
uint8_t chip_version;
@@ -219,8 +223,6 @@ struct nvbios {
spinlock_t lock;
- uint8_t data[NV_PROM_SIZE];
- unsigned int length;
bool execute;
uint8_t major_version;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index f3ce34b..9f9d50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -519,6 +519,19 @@ nouveau_connector_set_property(struct drm_connector *connector,
return nv_crtc->set_dither(nv_crtc, true);
}
+ if (nv_crtc && nv_crtc->set_color_vibrance) {
+ /* Hue */
+ if (property == disp->vibrant_hue_property) {
+ nv_crtc->vibrant_hue = value - 90;
+ return nv_crtc->set_color_vibrance(nv_crtc, true);
+ }
+ /* Saturation */
+ if (property == disp->color_vibrance_property) {
+ nv_crtc->color_vibrance = value - 100;
+ return nv_crtc->set_color_vibrance(nv_crtc, true);
+ }
+ }
+
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
return get_slave_funcs(encoder)->set_property(
encoder, connector, property, value);
@@ -858,6 +871,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
case DCB_CONNECTOR_LVDS :
case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
+ case DCB_CONNECTOR_DMS59_DP0:
+ case DCB_CONNECTOR_DMS59_DP1:
case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
@@ -1002,7 +1017,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
nv_connector->type == DCB_CONNECTOR_DVI_I ||
nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
- nv_connector->type == DCB_CONNECTOR_DP)) {
+ nv_connector->type == DCB_CONNECTOR_DP ||
+ nv_connector->type == DCB_CONNECTOR_DMS59_DP0 ||
+ nv_connector->type == DCB_CONNECTOR_DMS59_DP1)) {
drm_connector_attach_property(connector,
disp->underscan_property,
UNDERSCAN_OFF);
@@ -1014,6 +1031,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
0);
}
+ /* Add hue and saturation options */
+ if (disp->vibrant_hue_property)
+ drm_connector_attach_property(connector,
+ disp->vibrant_hue_property,
+ 90);
+ if (disp->color_vibrance_property)
+ drm_connector_attach_property(connector,
+ disp->color_vibrance_property,
+ 150);
+
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (dev_priv->card_type >= NV_50) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 686f6b4..e6d0d1e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -35,6 +35,8 @@ struct nouveau_crtc {
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
+ int color_vibrance;
+ int vibrant_hue;
int sharpness;
int last_dpms;
@@ -67,6 +69,7 @@ struct nouveau_crtc {
int (*set_dither)(struct nouveau_crtc *crtc, bool update);
int (*set_scale)(struct nouveau_crtc *crtc, bool update);
+ int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
};
static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 795a9e3..c01ae78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -155,20 +155,20 @@ static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
};
-struct drm_prop_enum_list {
+struct nouveau_drm_prop_enum_list {
u8 gen_mask;
int type;
char *name;
};
-static struct drm_prop_enum_list underscan[] = {
+static struct nouveau_drm_prop_enum_list underscan[] = {
{ 6, UNDERSCAN_AUTO, "auto" },
{ 6, UNDERSCAN_OFF, "off" },
{ 6, UNDERSCAN_ON, "on" },
{}
};
-static struct drm_prop_enum_list dither_mode[] = {
+static struct nouveau_drm_prop_enum_list dither_mode[] = {
{ 7, DITHERING_MODE_AUTO, "auto" },
{ 7, DITHERING_MODE_OFF, "off" },
{ 1, DITHERING_MODE_ON, "on" },
@@ -178,7 +178,7 @@ static struct drm_prop_enum_list dither_mode[] = {
{}
};
-static struct drm_prop_enum_list dither_depth[] = {
+static struct nouveau_drm_prop_enum_list dither_depth[] = {
{ 6, DITHERING_DEPTH_AUTO, "auto" },
{ 6, DITHERING_DEPTH_6BPC, "6 bpc" },
{ 6, DITHERING_DEPTH_8BPC, "8 bpc" },
@@ -186,7 +186,7 @@ static struct drm_prop_enum_list dither_depth[] = {
};
#define PROP_ENUM(p,gen,n,list) do { \
- struct drm_prop_enum_list *l = (list); \
+ struct nouveau_drm_prop_enum_list *l = (list); \
int c = 0; \
while (l->gen_mask) { \
if (l->gen_mask & (1 << (gen))) \
@@ -281,16 +281,24 @@ nouveau_display_create(struct drm_device *dev)
PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
disp->underscan_hborder_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "underscan hborder", 2);
- disp->underscan_hborder_property->values[0] = 0;
- disp->underscan_hborder_property->values[1] = 128;
+ drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
disp->underscan_vborder_property =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "underscan vborder", 2);
- disp->underscan_vborder_property->values[0] = 0;
- disp->underscan_vborder_property->values[1] = 128;
+ drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
+
+ if (gen == 1) {
+ disp->vibrant_hue_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "vibrant hue", 2);
+ disp->vibrant_hue_property->values[0] = 0;
+ disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
+
+ disp->color_vibrance_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "color vibrance", 2);
+ disp->color_vibrance_property->values[0] = 0;
+ disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
+ }
dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
@@ -309,6 +317,9 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 9b93b70..302b2f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -161,116 +161,6 @@ out:
return ret;
}
-static u32
-dp_link_bw_get(struct drm_device *dev, int or, int link)
-{
- u32 ctrl = nv_rd32(dev, 0x614300 + (or * 0x800));
- if (!(ctrl & 0x000c0000))
- return 162000;
- return 270000;
-}
-
-static int
-dp_lane_count_get(struct drm_device *dev, int or, int link)
-{
- u32 ctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
- switch (ctrl & 0x000f0000) {
- case 0x00010000: return 1;
- case 0x00030000: return 2;
- default:
- return 4;
- }
-}
-
-void
-nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
- const u32 symbol = 100000;
- int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
- int TU, VTUi, VTUf, VTUa;
- u64 link_data_rate, link_ratio, unk;
- u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, r;
-
- /* calculate packed data rate for each lane */
- link_nr = dp_lane_count_get(dev, or, link);
- link_data_rate = (clk * bpp / 8) / link_nr;
-
- /* calculate ratio of packed data rate to link symbol rate */
- link_bw = dp_link_bw_get(dev, or, link);
- link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
-
- for (TU = 64; TU >= 32; TU--) {
- /* calculate average number of valid symbols in each TU */
- u32 tu_valid = link_ratio * TU;
- u32 calc, diff;
-
- /* find a hw representation for the fraction.. */
- VTUi = tu_valid / symbol;
- calc = VTUi * symbol;
- diff = tu_valid - calc;
- if (diff) {
- if (diff >= (symbol / 2)) {
- VTUf = symbol / (symbol - diff);
- if (symbol - (VTUf * diff))
- VTUf++;
-
- if (VTUf <= 15) {
- VTUa = 1;
- calc += symbol - (symbol / VTUf);
- } else {
- VTUa = 0;
- VTUf = 1;
- calc += symbol;
- }
- } else {
- VTUa = 0;
- VTUf = min((int)(symbol / diff), 15);
- calc += symbol / VTUf;
- }
-
- diff = calc - tu_valid;
- } else {
- /* no remainder, but the hw doesn't like the fractional
- * part to be zero. decrement the integer part and
- * have the fraction add a whole symbol back
- */
- VTUa = 0;
- VTUf = 1;
- VTUi--;
- }
-
- if (diff < best_diff) {
- best_diff = diff;
- bestTU = TU;
- bestVTUa = VTUa;
- bestVTUf = VTUf;
- bestVTUi = VTUi;
- if (diff == 0)
- break;
- }
- }
-
- if (!bestTU) {
- NV_ERROR(dev, "DP: unable to find suitable config\n");
- return;
- }
-
- /* XXX close to vbios numbers, but not right */
- unk = (symbol - link_ratio) * bestTU;
- unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
- unk += 6;
-
- nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
- nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
- bestVTUf << 16 |
- bestVTUi << 8 |
- unk);
-}
-
u8 *
nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
{
@@ -318,13 +208,10 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
* link training
*****************************************************************************/
struct dp_state {
+ struct dp_train_func *func;
struct dcb_entry *dcb;
- u8 *table;
- u8 *entry;
int auxch;
int crtc;
- int or;
- int link;
u8 *dpcd;
int link_nr;
u32 link_bw;
@@ -335,142 +222,58 @@ struct dp_state {
static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
- int or = dp->or, link = dp->link;
- u8 *entry, sink[2];
- u32 dp_ctrl;
- u16 script;
+ u8 sink[2];
NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
- /* set selected link rate on source */
- switch (dp->link_bw) {
- case 270000:
- nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00040000);
- sink[0] = DP_LINK_BW_2_7;
- break;
- default:
- nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00000000);
- sink[0] = DP_LINK_BW_1_62;
- break;
- }
-
- /* offset +0x0a of each dp encoder table entry is a pointer to another
- * table, that has (among other things) pointers to more scripts that
- * need to be executed, this time depending on link speed.
- */
- entry = ROMPTR(dev, dp->entry[10]);
- if (entry) {
- if (dp->table[0] < 0x30) {
- while (dp->link_bw < (ROM16(entry[0]) * 10))
- entry += 4;
- script = ROM16(entry[2]);
- } else {
- while (dp->link_bw < (entry[0] * 27000))
- entry += 3;
- script = ROM16(entry[1]);
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
- }
+ /* set desired link configuration on the source */
+ dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
+ dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
- /* configure lane count on the source */
- dp_ctrl = ((1 << dp->link_nr) - 1) << 16;
+ /* inform the sink of the new configuration */
+ sink[0] = dp->link_bw / 27000;
sink[1] = dp->link_nr;
- if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) {
- dp_ctrl |= 0x00004000;
+ if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
-
- nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x001f4000, dp_ctrl);
- /* inform the sink of the new configuration */
auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2);
}
static void
-dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 tp)
+dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
{
u8 sink_tp;
- NV_DEBUG_KMS(dev, "training pattern %d\n", tp);
+ NV_DEBUG_KMS(dev, "training pattern %d\n", pattern);
- nv_mask(dev, NV50_SOR_DP_CTRL(dp->or, dp->link), 0x0f000000, tp << 24);
+ dp->func->train_set(dev, dp->dcb, pattern);
auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
sink_tp &= ~DP_TRAINING_PATTERN_MASK;
- sink_tp |= tp;
+ sink_tp |= pattern;
auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
}
-static const u8 nv50_lane_map[] = { 16, 8, 0, 24 };
-static const u8 nvaf_lane_map[] = { 24, 16, 8, 0 };
-
static int
dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 mask = 0, drv = 0, pre = 0, unk = 0;
- const u8 *shifts;
- int link = dp->link;
- int or = dp->or;
int i;
- if (dev_priv->chipset != 0xaf)
- shifts = nv50_lane_map;
- else
- shifts = nvaf_lane_map;
-
for (i = 0; i < dp->link_nr; i++) {
- u8 *conf = dp->entry + dp->table[4];
u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
u8 lpre = (lane & 0x0c) >> 2;
u8 lvsw = (lane & 0x03) >> 0;
- mask |= 0xff << shifts[i];
- unk |= 1 << (shifts[i] >> 3);
-
dp->conf[i] = (lpre << 3) | lvsw;
if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
- if (lpre == DP_TRAIN_PRE_EMPHASIS_9_5)
+ if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]);
-
- if (dp->table[0] < 0x30) {
- u8 *last = conf + (dp->entry[4] * dp->table[5]);
- while (lvsw != conf[0] || lpre != conf[1]) {
- conf += dp->table[5];
- if (conf >= last)
- return -EINVAL;
- }
-
- conf += 2;
- } else {
- /* no lookup table anymore, set entries for each
- * combination of voltage swing and pre-emphasis
- * level allowed by the DP spec.
- */
- switch (lvsw) {
- case 0: lpre += 0; break;
- case 1: lpre += 4; break;
- case 2: lpre += 7; break;
- case 3: lpre += 9; break;
- }
-
- conf = conf + (lpre * dp->table[5]);
- conf++;
- }
-
- drv |= conf[0] << shifts[i];
- pre |= conf[1] << shifts[i];
- unk = (unk & ~0x0000ff00) | (conf[2] << 8);
+ dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
}
- nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, drv);
- nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, pre);
- nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff0f, unk);
-
return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4);
}
@@ -554,8 +357,50 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
return eq_done ? 0 : -1;
}
+static void
+dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+{
+ u16 script = 0x0000;
+ u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
+ if (table) {
+ if (table[0] >= 0x20 && table[0] <= 0x30) {
+ if (enable) script = ROM16(entry[12]);
+ else script = ROM16(entry[14]);
+ }
+ }
+
+ nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+}
+
+static void
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
+{
+ u16 script = 0x0000;
+ u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
+ if (table) {
+ if (table[0] >= 0x20 && table[0] <= 0x30)
+ script = ROM16(entry[6]);
+ }
+
+ nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+}
+
+static void
+dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
+{
+ u16 script = 0x0000;
+ u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
+ if (table) {
+ if (table[0] >= 0x20 && table[0] <= 0x30)
+ script = ROM16(entry[8]);
+ }
+
+ nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+}
+
bool
-nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
+nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
+ struct dp_train_func *func)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -571,17 +416,15 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
if (!auxch)
return false;
- dp.table = nouveau_dp_bios_data(dev, nv_encoder->dcb, &dp.entry);
- if (!dp.table)
- return -EINVAL;
-
+ dp.func = func;
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
dp.auxch = auxch->drive;
- dp.or = nv_encoder->or;
- dp.link = !(nv_encoder->dcb->sorconf.link & 1);
dp.dpcd = nv_encoder->dp.dpcd;
+ /* adjust required bandwidth for 8B/10B coding overhead */
+ datarate = (datarate / 8) * 10;
+
/* some sinks toggle hotplug in response to some of the actions
* we take during link training (DP_SET_POWER is one), we need
* to ignore them for the moment to avoid races.
@@ -589,16 +432,10 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
/* enable down-spreading, if possible */
- if (dp.table[1] >= 16) {
- u16 script = ROM16(dp.entry[14]);
- if (nv_encoder->dp.dpcd[3] & 1)
- script = ROM16(dp.entry[12]);
-
- nouveau_bios_run_init_table(dev, script, dp.dcb, dp.crtc);
- }
+ dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
/* execute pre-train script from vbios */
- nouveau_bios_run_init_table(dev, ROM16(dp.entry[6]), dp.dcb, dp.crtc);
+ dp_link_train_init(dev, &dp);
/* start off at highest link rate supported by encoder and display */
while (*link_bw > nv_encoder->dp.link_bw)
@@ -632,13 +469,36 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
/* execute post-train script from vbios */
- nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
+ dp_link_train_fini(dev, &dp);
/* re-enable hotplug detect */
nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
return true;
}
+void
+nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
+ struct dp_train_func *func)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_i2c_chan *auxch;
+ u8 status;
+
+ auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ status = DP_SET_POWER_D0;
+ else
+ status = DP_SET_POWER_D3;
+
+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ nouveau_dp_link_train(encoder, datarate, func);
+}
+
bool
nouveau_dp_detect(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 81d7962..4f2030b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -57,6 +57,10 @@ MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
int nouveau_vram_notify = 0;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
+MODULE_PARM_DESC(vram_type, "Override detected VRAM type");
+char *nouveau_vram_type;
+module_param_named(vram_type, nouveau_vram_type, charp, 0400);
+
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
int nouveau_duallink = 1;
module_param_named(duallink, nouveau_duallink, int, 0400);
@@ -89,7 +93,7 @@ MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
int nouveau_override_conntype = 0;
module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
-MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
@@ -104,27 +108,27 @@ module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
"\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
"\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
- "\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
+ "\t\t0x100 vgaattr, 0x200 EVO (G80+)");
int nouveau_reg_debug;
module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
-MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
char *nouveau_perflvl;
module_param_named(perflvl, nouveau_perflvl, charp, 0400);
-MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
int nouveau_perflvl_wr;
module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
-MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)");
int nouveau_msi;
module_param_named(msi, nouveau_msi, int, 0400);
-MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
+MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)");
int nouveau_ctxfw;
module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
-MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS\n");
+MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS");
int nouveau_mxmdcb = 1;
module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b827098..a184ba3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -406,6 +406,9 @@ struct nouveau_display_engine {
struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
+ /* not really hue and saturation: */
+ struct drm_property *vibrant_hue_property;
+ struct drm_property *color_vibrance_property;
};
struct nouveau_gpio_engine {
@@ -432,58 +435,85 @@ struct nouveau_pm_voltage {
int nr_level;
};
+/* Exclusive upper limits */
+#define NV_MEM_CL_DDR2_MAX 8
+#define NV_MEM_WR_DDR2_MAX 9
+#define NV_MEM_CL_DDR3_MAX 17
+#define NV_MEM_WR_DDR3_MAX 17
+#define NV_MEM_CL_GDDR3_MAX 16
+#define NV_MEM_WR_GDDR3_MAX 18
+#define NV_MEM_CL_GDDR5_MAX 21
+#define NV_MEM_WR_GDDR5_MAX 20
+
struct nouveau_pm_memtiming {
int id;
- u32 reg_0; /* 0x10f290 on Fermi, 0x100220 for older */
- u32 reg_1;
- u32 reg_2;
- u32 reg_3;
- u32 reg_4;
- u32 reg_5;
- u32 reg_6;
- u32 reg_7;
- u32 reg_8;
- /* To be written to 0x1002c0 */
- u8 CL;
- u8 WR;
+
+ u32 reg[9];
+ u32 mr[4];
+
+ u8 tCWL;
+
+ u8 odt;
+ u8 drive_strength;
};
-struct nouveau_pm_tbl_header{
+struct nouveau_pm_tbl_header {
u8 version;
u8 header_len;
u8 entry_cnt;
u8 entry_len;
};
-struct nouveau_pm_tbl_entry{
+struct nouveau_pm_tbl_entry {
u8 tWR;
- u8 tUNK_1;
+ u8 tWTR;
u8 tCL;
- u8 tRP; /* Byte 3 */
+ u8 tRC;
u8 empty_4;
- u8 tRAS; /* Byte 5 */
+ u8 tRFC; /* Byte 5 */
u8 empty_6;
- u8 tRFC; /* Byte 7 */
+ u8 tRAS; /* Byte 7 */
u8 empty_8;
- u8 tRC; /* Byte 9 */
- u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
- u8 empty_15,empty_16,empty_17;
- u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 tRP; /* Byte 9 */
+ u8 tRCDRD;
+ u8 tRCDWR;
+ u8 tRRD;
+ u8 tUNK_13;
+ u8 RAM_FT1; /* 14, a bitmask of random RAM features */
+ u8 empty_15;
+ u8 tUNK_16;
+ u8 empty_17;
+ u8 tUNK_18;
+ u8 tCWL;
+ u8 tUNK_20, tUNK_21;
};
-/* nouveau_mem.c */
-void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
- struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
- struct nouveau_pm_memtiming *timing);
+struct nouveau_pm_profile;
+struct nouveau_pm_profile_func {
+ void (*destroy)(struct nouveau_pm_profile *);
+ void (*init)(struct nouveau_pm_profile *);
+ void (*fini)(struct nouveau_pm_profile *);
+ struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
+};
+
+struct nouveau_pm_profile {
+ const struct nouveau_pm_profile_func *func;
+ struct list_head head;
+ char name[8];
+};
#define NOUVEAU_PM_MAX_LEVEL 8
struct nouveau_pm_level {
+ struct nouveau_pm_profile profile;
struct device_attribute dev_attr;
char name[32];
int id;
- u32 core;
+ struct nouveau_pm_memtiming timing;
u32 memory;
+ u16 memscript;
+
+ u32 core;
u32 shader;
u32 rop;
u32 copy;
@@ -498,9 +528,6 @@ struct nouveau_pm_level {
u32 volt_min; /* microvolts */
u32 volt_max;
u8 fanspeed;
-
- u16 memscript;
- struct nouveau_pm_memtiming *timing;
};
struct nouveau_pm_temp_sensor_constants {
@@ -517,27 +544,26 @@ struct nouveau_pm_threshold_temp {
s16 fan_boost;
};
-struct nouveau_pm_memtimings {
- bool supported;
- struct nouveau_pm_memtiming *timing;
- int nr_timing;
-};
-
struct nouveau_pm_fan {
+ u32 percent;
u32 min_duty;
u32 max_duty;
u32 pwm_freq;
+ u32 pwm_divisor;
};
struct nouveau_pm_engine {
struct nouveau_pm_voltage voltage;
struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
int nr_perflvl;
- struct nouveau_pm_memtimings memtimings;
struct nouveau_pm_temp_sensor_constants sensor_constants;
struct nouveau_pm_threshold_temp threshold_temp;
struct nouveau_pm_fan fan;
- u32 pwm_divisor;
+
+ struct nouveau_pm_profile *profile_ac;
+ struct nouveau_pm_profile *profile_dc;
+ struct nouveau_pm_profile *profile;
+ struct list_head profiles;
struct nouveau_pm_level boot;
struct nouveau_pm_level *cur;
@@ -669,14 +695,14 @@ struct nv04_mode_state {
};
enum nouveau_card_type {
- NV_04 = 0x00,
+ NV_04 = 0x04,
NV_10 = 0x10,
NV_20 = 0x20,
NV_30 = 0x30,
NV_40 = 0x40,
NV_50 = 0x50,
NV_C0 = 0xc0,
- NV_D0 = 0xd0
+ NV_D0 = 0xd0,
};
struct drm_nouveau_private {
@@ -772,8 +798,22 @@ struct drm_nouveau_private {
} tile;
/* VRAM/fb configuration */
+ enum {
+ NV_MEM_TYPE_UNKNOWN = 0,
+ NV_MEM_TYPE_STOLEN,
+ NV_MEM_TYPE_SGRAM,
+ NV_MEM_TYPE_SDRAM,
+ NV_MEM_TYPE_DDR1,
+ NV_MEM_TYPE_DDR2,
+ NV_MEM_TYPE_DDR3,
+ NV_MEM_TYPE_GDDR2,
+ NV_MEM_TYPE_GDDR3,
+ NV_MEM_TYPE_GDDR4,
+ NV_MEM_TYPE_GDDR5
+ } vram_type;
uint64_t vram_size;
uint64_t vram_sys_base;
+ bool vram_rank_B;
uint64_t fb_available_size;
uint64_t fb_mappable_pages;
@@ -846,6 +886,7 @@ extern int nouveau_uscript_lvds;
extern int nouveau_uscript_tmds;
extern int nouveau_vram_pushbuf;
extern int nouveau_vram_notify;
+extern char *nouveau_vram_type;
extern int nouveau_fbpercrtc;
extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
@@ -894,8 +935,12 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
-extern int nouveau_mem_detect(struct drm_device *);
extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
+extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
+ struct nouveau_pm_memtiming *);
+extern void nouveau_mem_timing_read(struct drm_device *,
+ struct nouveau_pm_memtiming *);
+extern int nouveau_mem_vbios_type(struct drm_device *);
extern struct nouveau_tile_reg *nv10_mem_set_tiling(
struct drm_device *dev, uint32_t addr, uint32_t size,
uint32_t pitch, uint32_t flags);
@@ -1117,19 +1162,14 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
/* nouveau_hdmi.c */
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
-/* nouveau_dp.c */
-int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
- uint8_t *data, int data_nr);
-bool nouveau_dp_detect(struct drm_encoder *);
-bool nouveau_dp_link_train(struct drm_encoder *, u32 datarate);
-void nouveau_dp_tu_update(struct drm_device *, int, int, u32, u32);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
-
/* nv04_fb.c */
+extern int nv04_fb_vram_init(struct drm_device *);
extern int nv04_fb_init(struct drm_device *);
extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
+extern int nv10_fb_vram_init(struct drm_device *dev);
+extern int nv1a_fb_vram_init(struct drm_device *dev);
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
@@ -1138,6 +1178,16 @@ extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
+/* nv20_fb.c */
+extern int nv20_fb_vram_init(struct drm_device *dev);
+extern int nv20_fb_init(struct drm_device *);
+extern void nv20_fb_takedown(struct drm_device *);
+extern void nv20_fb_init_tile_region(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv20_fb_set_tile_region(struct drm_device *dev, int i);
+extern void nv20_fb_free_tile_region(struct drm_device *dev, int i);
+
/* nv30_fb.c */
extern int nv30_fb_init(struct drm_device *);
extern void nv30_fb_takedown(struct drm_device *);
@@ -1147,6 +1197,7 @@ extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
/* nv40_fb.c */
+extern int nv40_fb_vram_init(struct drm_device *dev);
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
@@ -1703,6 +1754,7 @@ nv44_graph_class(struct drm_device *dev)
#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
#define NV_MEM_ACCESS_SYS 4
#define NV_MEM_ACCESS_VM 8
+#define NV_MEM_ACCESS_NOSNOOP 16
#define NV_MEM_TARGET_VRAM 0
#define NV_MEM_TARGET_PCI 1
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e5d6e3f..3dc14a3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -32,6 +32,14 @@
#define NV_DPMS_CLEARED 0x80
+struct dp_train_func {
+ void (*link_set)(struct drm_device *, struct dcb_entry *, int crtc,
+ int nr, u32 bw, bool enhframe);
+ void (*train_set)(struct drm_device *, struct dcb_entry *, u8 pattern);
+ void (*train_adj)(struct drm_device *, struct dcb_entry *,
+ u8 lane, u8 swing, u8 preem);
+};
+
struct nouveau_encoder {
struct drm_encoder_slave base;
@@ -78,9 +86,19 @@ get_slave_funcs(struct drm_encoder *enc)
return to_encoder_slave(enc)->slave_funcs;
}
+/* nouveau_dp.c */
+int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ uint8_t *data, int data_nr);
+bool nouveau_dp_detect(struct drm_encoder *);
+void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
+ struct dp_train_func *);
+u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
+
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
+void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
+
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9892218..8113e92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -381,11 +381,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
goto out_unref;
}
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 820ae7f..8f4f914 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -277,7 +277,7 @@ i2c_bit_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-const struct i2c_algorithm i2c_bit_algo = {
+const struct i2c_algorithm nouveau_i2c_bit_algo = {
.master_xfer = i2c_bit_xfer,
.functionality = i2c_bit_func
};
@@ -384,12 +384,12 @@ nouveau_i2c_init(struct drm_device *dev)
case 0: /* NV04:NV50 */
port->drive = entry[0];
port->sense = entry[1];
- port->adapter.algo = &i2c_bit_algo;
+ port->adapter.algo = &nouveau_i2c_bit_algo;
break;
case 4: /* NV4E */
port->drive = 0x600800 + entry[1];
port->sense = port->drive;
- port->adapter.algo = &i2c_bit_algo;
+ port->adapter.algo = &nouveau_i2c_bit_algo;
break;
case 5: /* NV50- */
port->drive = entry[0] & 0x0f;
@@ -402,7 +402,7 @@ nouveau_i2c_init(struct drm_device *dev)
port->drive = 0x00d014 + (port->drive * 0x20);
port->sense = port->drive;
}
- port->adapter.algo = &i2c_bit_algo;
+ port->adapter.algo = &nouveau_i2c_bit_algo;
break;
case 6: /* NV50- DP AUX */
port->drive = entry[0];
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c3a5745..b08065f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -26,7 +26,8 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors:
- * Keith Whitwell <keith@tungstengraphics.com>
+ * Ben Skeggs <bskeggs@redhat.com>
+ * Roy Spliet <r.spliet@student.tudelft.nl>
*/
@@ -192,75 +193,6 @@ nouveau_mem_gart_fini(struct drm_device *dev)
}
}
-static uint32_t
-nouveau_mem_detect_nv04(struct drm_device *dev)
-{
- uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
-
- if (boot0 & 0x00000100)
- return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
-
- switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
- case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- return 32 * 1024 * 1024;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- return 16 * 1024 * 1024;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- return 8 * 1024 * 1024;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- return 4 * 1024 * 1024;
- }
-
- return 0;
-}
-
-static uint32_t
-nouveau_mem_detect_nforce(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct pci_dev *bridge;
- uint32_t mem;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
- if (!bridge) {
- NV_ERROR(dev, "no bridge device\n");
- return 0;
- }
-
- if (dev_priv->flags & NV_NFORCE) {
- pci_read_config_dword(bridge, 0x7C, &mem);
- return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
- } else
- if (dev_priv->flags & NV_NFORCE2) {
- pci_read_config_dword(bridge, 0x84, &mem);
- return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
- }
-
- NV_ERROR(dev, "impossible!\n");
- return 0;
-}
-
-int
-nouveau_mem_detect(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->card_type == NV_04) {
- dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
- } else
- if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
- dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
- } else
- if (dev_priv->card_type < NV_50) {
- dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
- }
-
- if (dev_priv->vram_size)
- return 0;
- return -ENOMEM;
-}
-
bool
nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
{
@@ -385,11 +317,29 @@ nouveau_mem_init_agp(struct drm_device *dev)
return 0;
}
+static const struct vram_types {
+ int value;
+ const char *name;
+} vram_type_map[] = {
+ { NV_MEM_TYPE_STOLEN , "stolen system memory" },
+ { NV_MEM_TYPE_SGRAM , "SGRAM" },
+ { NV_MEM_TYPE_SDRAM , "SDRAM" },
+ { NV_MEM_TYPE_DDR1 , "DDR1" },
+ { NV_MEM_TYPE_DDR2 , "DDR2" },
+ { NV_MEM_TYPE_DDR3 , "DDR3" },
+ { NV_MEM_TYPE_GDDR2 , "GDDR2" },
+ { NV_MEM_TYPE_GDDR3 , "GDDR3" },
+ { NV_MEM_TYPE_GDDR4 , "GDDR4" },
+ { NV_MEM_TYPE_GDDR5 , "GDDR5" },
+ { NV_MEM_TYPE_UNKNOWN, "unknown type" }
+};
+
int
nouveau_mem_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ const struct vram_types *vram_type;
int ret, dma_bits;
dma_bits = 32;
@@ -427,7 +377,21 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+ vram_type = vram_type_map;
+ while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
+ if (nouveau_vram_type) {
+ if (!strcasecmp(nouveau_vram_type, vram_type->name))
+ break;
+ dev_priv->vram_type = vram_type->value;
+ } else {
+ if (vram_type->value == dev_priv->vram_type)
+ break;
+ }
+ vram_type++;
+ }
+
+ NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
+ (int)(dev_priv->vram_size >> 20), vram_type->name);
if (dev_priv->vram_sys_base) {
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
dev_priv->vram_sys_base);
@@ -508,216 +472,617 @@ nouveau_mem_gart_init(struct drm_device *dev)
return 0;
}
-/* XXX: For now a dummy. More samples required, possibly even a card
- * Called from nouveau_perf.c */
-void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
- struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
- struct nouveau_pm_memtiming *timing) {
-
- NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers");
-}
-
-void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
- struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
- struct nouveau_pm_memtiming *timing) {
-
- timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
+static int
+nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_tbl_entry *e, u8 len,
+ struct nouveau_pm_memtiming *boot,
+ struct nouveau_pm_memtiming *t)
+{
+ t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
/* XXX: I don't trust the -1's and +1's... they must come
* from somewhere! */
- timing->reg_1 = (e->tWR + 2 + magic_number) << 24 |
- 1 << 16 |
- (e->tUNK_1 + 2 + magic_number) << 8 |
- (e->tCL + 2 - magic_number);
- timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
- timing->reg_2 |= 0x20200000;
-
- NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id,
- timing->reg_0, timing->reg_1,timing->reg_2);
+ t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
+ 1 << 16 |
+ (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
+ (e->tCL + 2 - (t->tCWL - 1));
+
+ t->reg[2] = 0x20200000 |
+ ((t->tCWL - 1) << 24 |
+ e->tRRD << 16 |
+ e->tRCDWR << 8 |
+ e->tRCDRD);
+
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
+ t->reg[0], t->reg[1], t->reg[2]);
+ return 0;
}
-void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr,
- struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) {
+static int
+nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_tbl_entry *e, u8 len,
+ struct nouveau_pm_memtiming *boot,
+ struct nouveau_pm_memtiming *t)
+{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct bit_entry P;
+ uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
- uint8_t unk18 = 1,
- unk19 = 1,
- unk20 = 0,
- unk21 = 0;
+ if (bit_table(dev, 'P', &P))
+ return -EINVAL;
- switch (min(hdr->entry_len, (u8) 22)) {
+ switch (min(len, (u8) 22)) {
case 22:
unk21 = e->tUNK_21;
case 21:
unk20 = e->tUNK_20;
case 20:
- unk19 = e->tUNK_19;
+ if (e->tCWL > 0)
+ t->tCWL = e->tCWL;
case 19:
unk18 = e->tUNK_18;
break;
}
- timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
+ t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
- /* XXX: I don't trust the -1's and +1's... they must come
- * from somewhere! */
- timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 |
- max(unk18, (u8) 1) << 16 |
- (e->tUNK_1 + unk19 + 1 + magic_number) << 8;
- if (dev_priv->chipset == 0xa8) {
- timing->reg_1 |= (e->tCL - 1);
- } else {
- timing->reg_1 |= (e->tCL + 2 - magic_number);
- }
- timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
-
- timing->reg_5 = (e->tRAS << 24 | e->tRC);
- timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16;
-
- if (P->version == 1) {
- timing->reg_2 |= magic_number << 24;
- timing->reg_3 = (0x14 + e->tCL) << 24 |
- 0x16 << 16 |
- (e->tCL - 1) << 8 |
- (e->tCL - 1);
- timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8 | e->tUNK_13;
- timing->reg_5 |= (e->tCL + 2) << 8;
- timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16;
+ t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
+ max(unk18, (u8) 1) << 16 |
+ (e->tWTR + 2 + (t->tCWL - 1)) << 8;
+
+ t->reg[2] = ((t->tCWL - 1) << 24 |
+ e->tRRD << 16 |
+ e->tRCDWR << 8 |
+ e->tRCDRD);
+
+ t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
+
+ t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
+
+ t->reg[8] = boot->reg[8] & 0xffffff00;
+
+ if (P.version == 1) {
+ t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
+
+ t->reg[3] = (0x14 + e->tCL) << 24 |
+ 0x16 << 16 |
+ (e->tCL - 1) << 8 |
+ (e->tCL - 1);
+
+ t->reg[4] |= boot->reg[4] & 0xffff0000;
+
+ t->reg[6] = (0x33 - t->tCWL) << 16 |
+ t->tCWL << 8 |
+ (0x2e + e->tCL - t->tCWL);
+
+ t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
+
+ /* XXX: P.version == 1 only has DDR2 and GDDR3? */
+ if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
+ t->reg[5] |= (e->tCL + 3) << 8;
+ t->reg[6] |= (t->tCWL - 2) << 8;
+ t->reg[8] |= (e->tCL - 4);
+ } else {
+ t->reg[5] |= (e->tCL + 2) << 8;
+ t->reg[6] |= t->tCWL << 8;
+ t->reg[8] |= (e->tCL - 2);
+ }
} else {
- timing->reg_2 |= (unk19 - 1) << 24;
- /* XXX: reg_10022c for recentish cards pretty much unknown*/
- timing->reg_3 = e->tCL - 1;
- timing->reg_4 = (unk20 << 24 | unk21 << 16 |
- e->tUNK_13 << 8 | e->tUNK_13);
+ t->reg[1] |= (5 + e->tCL - (t->tCWL));
+
+ /* XXX: 0xb? 0x30? */
+ t->reg[3] = (0x30 + e->tCL) << 24 |
+ (boot->reg[3] & 0x00ff0000)|
+ (0xb + e->tCL) << 8 |
+ (e->tCL - 1);
+
+ t->reg[4] |= (unk20 << 24 | unk21 << 16);
+
/* XXX: +6? */
- timing->reg_5 |= (unk19 + 6) << 8;
+ t->reg[5] |= (t->tCWL + 6) << 8;
- /* XXX: reg_10023c currently unknown
- * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
- timing->reg_7 = 0x202;
+ t->reg[6] = (0x5a + e->tCL) << 16 |
+ (6 - e->tCL + t->tCWL) << 8 |
+ (0x50 + e->tCL - t->tCWL);
+
+ tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
+ t->reg[7] = (tmp7_3 << 24) |
+ ((tmp7_3 - 6 + e->tCL) << 16) |
+ 0x202;
}
- NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id,
- timing->reg_0, timing->reg_1,
- timing->reg_2, timing->reg_3);
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
+ t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
- timing->reg_4, timing->reg_5,
- timing->reg_6, timing->reg_7);
- NV_DEBUG(dev, " 240: %08x\n", timing->reg_8);
-}
-
-void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
- struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) {
- timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP);
- timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f);
- timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8;
- timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13;
- timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15;
- NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id,
- timing->reg_0, timing->reg_1,
- timing->reg_2, timing->reg_3);
- NV_DEBUG(dev, " 2a0: %08x %08x %08x %08x\n",
- timing->reg_4, timing->reg_5,
- timing->reg_6, timing->reg_7);
+ t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
+ NV_DEBUG(dev, " 240: %08x\n", t->reg[8]);
+ return 0;
+}
+
+static int
+nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_tbl_entry *e, u8 len,
+ struct nouveau_pm_memtiming *boot,
+ struct nouveau_pm_memtiming *t)
+{
+ if (e->tCWL > 0)
+ t->tCWL = e->tCWL;
+
+ t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
+ e->tRFC << 8 | e->tRC);
+
+ t->reg[1] = (boot->reg[1] & 0xff000000) |
+ (e->tRCDWR & 0x0f) << 20 |
+ (e->tRCDRD & 0x0f) << 14 |
+ (t->tCWL << 7) |
+ (e->tCL & 0x0f);
+
+ t->reg[2] = (boot->reg[2] & 0xff0000ff) |
+ e->tWR << 16 | e->tWTR << 8;
+
+ t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
+ (e->tUNK_21 & 0xf) << 5 |
+ (e->tUNK_13 & 0x1f);
+
+ t->reg[4] = (boot->reg[4] & 0xfff00fff) |
+ (e->tRRD&0x1f) << 15;
+
+ NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
+ t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
+ NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]);
+ return 0;
}
/**
- * Processes the Memory Timing BIOS table, stores generated
- * register values
- * @pre init scripts were run, memtiming regs are initialized
+ * MR generation methods
*/
-void
-nouveau_mem_timing_init(struct drm_device *dev)
+
+static int
+nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_tbl_entry *e, u8 len,
+ struct nouveau_pm_memtiming *boot,
+ struct nouveau_pm_memtiming *t)
+{
+ t->drive_strength = 0;
+ if (len < 15) {
+ t->odt = boot->odt;
+ } else {
+ t->odt = e->RAM_FT1 & 0x07;
+ }
+
+ if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
+ NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+ return -ERANGE;
+ }
+
+ if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
+ NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+ return -ERANGE;
+ }
+
+ if (t->odt > 3) {
+ NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
+ t->id, t->odt);
+ t->odt = 0;
+ }
+
+ t->mr[0] = (boot->mr[0] & 0x100f) |
+ (e->tCL) << 4 |
+ (e->tWR - 1) << 9;
+ t->mr[1] = (boot->mr[1] & 0x101fbb) |
+ (t->odt & 0x1) << 2 |
+ (t->odt & 0x2) << 5;
+
+ NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
+ return 0;
+}
+
+uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
+ 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
+
+static int
+nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_tbl_entry *e, u8 len,
+ struct nouveau_pm_memtiming *boot,
+ struct nouveau_pm_memtiming *t)
+{
+ u8 cl = e->tCL - 4;
+
+ t->drive_strength = 0;
+ if (len < 15) {
+ t->odt = boot->odt;
+ } else {
+ t->odt = e->RAM_FT1 & 0x07;
+ }
+
+ if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
+ NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+ return -ERANGE;
+ }
+
+ if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
+ NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+ return -ERANGE;
+ }
+
+ if (e->tCWL < 5) {
+ NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
+ return -ERANGE;
+ }
+
+ t->mr[0] = (boot->mr[0] & 0x180b) |
+ /* CAS */
+ (cl & 0x7) << 4 |
+ (cl & 0x8) >> 1 |
+ (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
+ t->mr[1] = (boot->mr[1] & 0x101dbb) |
+ (t->odt & 0x1) << 2 |
+ (t->odt & 0x2) << 5 |
+ (t->odt & 0x4) << 7;
+ t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
+
+ NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
+ return 0;
+}
+
+uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
+ 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
+uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
+ 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
+
+static int
+nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_tbl_entry *e, u8 len,
+ struct nouveau_pm_memtiming *boot,
+ struct nouveau_pm_memtiming *t)
+{
+ if (len < 15) {
+ t->drive_strength = boot->drive_strength;
+ t->odt = boot->odt;
+ } else {
+ t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
+ t->odt = e->RAM_FT1 & 0x07;
+ }
+
+ if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
+ NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+ return -ERANGE;
+ }
+
+ if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
+ NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+ return -ERANGE;
+ }
+
+ if (t->odt > 3) {
+ NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
+ t->id, t->odt);
+ t->odt = 0;
+ }
+
+ t->mr[0] = (boot->mr[0] & 0xe0b) |
+ /* CAS */
+ ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
+ ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
+ t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
+ (t->odt << 2) |
+ (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
+ t->mr[2] = boot->mr[2];
+
+ NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
+ t->mr[0], t->mr[1], t->mr[2]);
+ return 0;
+}
+
+static int
+nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_tbl_entry *e, u8 len,
+ struct nouveau_pm_memtiming *boot,
+ struct nouveau_pm_memtiming *t)
+{
+ if (len < 15) {
+ t->drive_strength = boot->drive_strength;
+ t->odt = boot->odt;
+ } else {
+ t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
+ t->odt = e->RAM_FT1 & 0x03;
+ }
+
+ if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
+ NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+ return -ERANGE;
+ }
+
+ if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
+ NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+ return -ERANGE;
+ }
+
+ if (t->odt > 3) {
+ NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
+ t->id, t->odt);
+ t->odt = 0;
+ }
+
+ t->mr[0] = (boot->mr[0] & 0x007) |
+ ((e->tCL - 5) << 3) |
+ ((e->tWR - 4) << 8);
+ t->mr[1] = (boot->mr[1] & 0x1007f0) |
+ t->drive_strength |
+ (t->odt << 2);
+
+ NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
+ return 0;
+}
+
+int
+nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
+ struct nouveau_pm_memtiming *t)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
- struct nvbios *bios = &dev_priv->vbios;
- struct bit_entry P;
- struct nouveau_pm_tbl_header *hdr = NULL;
- uint8_t magic_number;
- u8 *entry;
- int i;
+ struct nouveau_pm_memtiming *boot = &pm->boot.timing;
+ struct nouveau_pm_tbl_entry *e;
+ u8 ver, len, *ptr, *ramcfg;
+ int ret;
+
+ ptr = nouveau_perf_timing(dev, freq, &ver, &len);
+ if (!ptr || ptr[0] == 0x00) {
+ *t = *boot;
+ return 0;
+ }
+ e = (struct nouveau_pm_tbl_entry *)ptr;
+
+ t->tCWL = boot->tCWL;
+
+ switch (dev_priv->card_type) {
+ case NV_40:
+ ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
+ break;
+ case NV_50:
+ ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
+ break;
+ case NV_C0:
+ ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
- if (bios->type == NVBIOS_BIT) {
- if (bit_table(dev, 'P', &P))
- return;
+ switch (dev_priv->vram_type * !ret) {
+ case NV_MEM_TYPE_GDDR3:
+ ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
+ break;
+ case NV_MEM_TYPE_GDDR5:
+ ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
+ break;
+ case NV_MEM_TYPE_DDR2:
+ ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
+ break;
+ case NV_MEM_TYPE_DDR3:
+ ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
+ if (ramcfg) {
+ int dll_off;
- if (P.version == 1)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]);
+ if (ver == 0x00)
+ dll_off = !!(ramcfg[3] & 0x04);
else
- if (P.version == 2)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]);
- else {
- NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
+ dll_off = !!(ramcfg[2] & 0x40);
+
+ switch (dev_priv->vram_type) {
+ case NV_MEM_TYPE_GDDR3:
+ t->mr[1] &= ~0x00000040;
+ t->mr[1] |= 0x00000040 * dll_off;
+ break;
+ default:
+ t->mr[1] &= ~0x00000001;
+ t->mr[1] |= 0x00000001 * dll_off;
+ break;
}
+ }
+
+ return ret;
+}
+
+void
+nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 timing_base, timing_regs, mr_base;
+ int i;
+
+ if (dev_priv->card_type >= 0xC0) {
+ timing_base = 0x10f290;
+ mr_base = 0x10f300;
} else {
- NV_DEBUG(dev, "BMP version too old for memory\n");
- return;
+ timing_base = 0x100220;
+ mr_base = 0x1002c0;
}
- if (!hdr) {
- NV_DEBUG(dev, "memory timing table pointer invalid\n");
+ t->id = -1;
+
+ switch (dev_priv->card_type) {
+ case NV_50:
+ timing_regs = 9;
+ break;
+ case NV_C0:
+ case NV_D0:
+ timing_regs = 5;
+ break;
+ case NV_30:
+ case NV_40:
+ timing_regs = 3;
+ break;
+ default:
+ timing_regs = 0;
return;
}
+ for(i = 0; i < timing_regs; i++)
+ t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
+
+ t->tCWL = 0;
+ if (dev_priv->card_type < NV_C0) {
+ t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
+ } else if (dev_priv->card_type <= NV_D0) {
+ t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
+ }
- if (hdr->version != 0x10) {
- NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version);
- return;
+ t->mr[0] = nv_rd32(dev, mr_base);
+ t->mr[1] = nv_rd32(dev, mr_base + 0x04);
+ t->mr[2] = nv_rd32(dev, mr_base + 0x20);
+ t->mr[3] = nv_rd32(dev, mr_base + 0x24);
+
+ t->odt = 0;
+ t->drive_strength = 0;
+
+ switch (dev_priv->vram_type) {
+ case NV_MEM_TYPE_DDR3:
+ t->odt |= (t->mr[1] & 0x200) >> 7;
+ case NV_MEM_TYPE_DDR2:
+ t->odt |= (t->mr[1] & 0x04) >> 2 |
+ (t->mr[1] & 0x40) >> 5;
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ case NV_MEM_TYPE_GDDR5:
+ t->drive_strength = t->mr[1] & 0x03;
+ t->odt = (t->mr[1] & 0x0c) >> 2;
+ break;
+ default:
+ break;
}
+}
- /* validate record length */
- if (hdr->entry_len < 15) {
- NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len);
- return;
+int
+nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
+ struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
+ struct nouveau_pm_memtiming *info = &perflvl->timing;
+ u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
+ u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
+ u32 mr1_dlloff;
+
+ switch (dev_priv->vram_type) {
+ case NV_MEM_TYPE_DDR2:
+ tDLLK = 2000;
+ mr1_dlloff = 0x00000001;
+ break;
+ case NV_MEM_TYPE_DDR3:
+ tDLLK = 12000;
+ mr1_dlloff = 0x00000001;
+ break;
+ case NV_MEM_TYPE_GDDR3:
+ tDLLK = 40000;
+ mr1_dlloff = 0x00000040;
+ break;
+ default:
+ NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
+ return -ENODEV;
}
- /* parse vbios entries into common format */
- memtimings->timing =
- kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL);
- if (!memtimings->timing)
- return;
+ /* fetch current MRs */
+ switch (dev_priv->vram_type) {
+ case NV_MEM_TYPE_GDDR3:
+ case NV_MEM_TYPE_DDR3:
+ mr[2] = exec->mrg(exec, 2);
+ default:
+ mr[1] = exec->mrg(exec, 1);
+ mr[0] = exec->mrg(exec, 0);
+ break;
+ }
- /* Get "some number" from the timing reg for NV_40 and NV_50
- * Used in calculations later... source unknown */
- magic_number = 0;
- if (P.version == 1) {
- magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
+ /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
+ if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
+ exec->precharge(exec);
+ exec->mrs (exec, 1, mr[1] | mr1_dlloff);
+ exec->wait(exec, tMRD);
}
- entry = (u8*) hdr + hdr->header_len;
- for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) {
- struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
- if (entry[0] == 0)
- continue;
+ /* enter self-refresh mode */
+ exec->precharge(exec);
+ exec->refresh(exec);
+ exec->refresh(exec);
+ exec->refresh_auto(exec, false);
+ exec->refresh_self(exec, true);
+ exec->wait(exec, tCKSRE);
+
+ /* modify input clock frequency */
+ exec->clock_set(exec);
+
+ /* exit self-refresh mode */
+ exec->wait(exec, tCKSRX);
+ exec->precharge(exec);
+ exec->refresh_self(exec, false);
+ exec->refresh_auto(exec, true);
+ exec->wait(exec, tXS);
+
+ /* update MRs */
+ if (mr[2] != info->mr[2]) {
+ exec->mrs (exec, 2, info->mr[2]);
+ exec->wait(exec, tMRD);
+ }
+
+ if (mr[1] != info->mr[1]) {
+ /* need to keep DLL off until later, at least on GDDR3 */
+ exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
+ exec->wait(exec, tMRD);
+ }
+
+ if (mr[0] != info->mr[0]) {
+ exec->mrs (exec, 0, info->mr[0]);
+ exec->wait(exec, tMRD);
+ }
- timing->id = i;
- timing->WR = entry[0];
- timing->CL = entry[2];
+ /* update PFB timing registers */
+ exec->timing_set(exec);
- if(dev_priv->card_type <= NV_40) {
- nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
- } else if(dev_priv->card_type == NV_50){
- nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
- } else if(dev_priv->card_type == NV_C0) {
- nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]);
+ /* DLL (enable + ) reset */
+ if (!(info->mr[1] & mr1_dlloff)) {
+ if (mr[1] & mr1_dlloff) {
+ exec->mrs (exec, 1, info->mr[1]);
+ exec->wait(exec, tMRD);
}
+ exec->mrs (exec, 0, info->mr[0] | 0x00000100);
+ exec->wait(exec, tMRD);
+ exec->mrs (exec, 0, info->mr[0] | 0x00000000);
+ exec->wait(exec, tMRD);
+ exec->wait(exec, tDLLK);
+ if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
+ exec->precharge(exec);
}
- memtimings->nr_timing = hdr->entry_cnt;
- memtimings->supported = P.version == 1;
+ return 0;
}
-void
-nouveau_mem_timing_fini(struct drm_device *dev)
+int
+nouveau_mem_vbios_type(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
+ struct bit_entry M;
+ u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
+ if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
+ u8 *table = ROMPTR(dev, M.data[3]);
+ if (table && table[0] == 0x10 && ramcfg < table[3]) {
+ u8 *entry = table + table[1] + (ramcfg * table[2]);
+ switch (entry[0] & 0x0f) {
+ case 0: return NV_MEM_TYPE_DDR2;
+ case 1: return NV_MEM_TYPE_DDR3;
+ case 2: return NV_MEM_TYPE_GDDR3;
+ case 3: return NV_MEM_TYPE_GDDR5;
+ default:
+ break;
+ }
- if(mem->timing) {
- kfree(mem->timing);
- mem->timing = NULL;
+ }
}
+ return NV_MEM_TYPE_UNKNOWN;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
index e5a64f0..07d0d1e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mxm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c
@@ -582,6 +582,35 @@ mxm_shadow_dsm(struct drm_device *dev, u8 version)
#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+static u8
+wmi_wmmx_mxmi(struct drm_device *dev, u8 version)
+{
+ u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
+ struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
+ struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+ if (ACPI_FAILURE(status)) {
+ MXM_DBG(dev, "WMMX MXMI returned %d\n", status);
+ return 0x00;
+ }
+
+ obj = retn.pointer;
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ version = obj->integer.value;
+ MXM_DBG(dev, "WMMX MXMI version %d.%d\n",
+ (version >> 4), version & 0x0f);
+ } else {
+ version = 0;
+ MXM_DBG(dev, "WMMX MXMI returned non-integer\n");
+ }
+
+ kfree(obj);
+ return version;
+}
+
static bool
mxm_shadow_wmi(struct drm_device *dev, u8 version)
{
@@ -592,7 +621,15 @@ mxm_shadow_wmi(struct drm_device *dev, u8 version)
union acpi_object *obj;
acpi_status status;
- if (!wmi_has_guid(WMI_WMMX_GUID))
+ if (!wmi_has_guid(WMI_WMMX_GUID)) {
+ MXM_DBG(dev, "WMMX GUID not found\n");
+ return false;
+ }
+
+ mxms_args[1] = wmi_wmmx_mxmi(dev, 0x00);
+ if (!mxms_args[1])
+ mxms_args[1] = wmi_wmmx_mxmi(dev, version);
+ if (!mxms_args[1])
return false;
status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 58f4973..69a528d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -27,6 +27,178 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+static u8 *
+nouveau_perf_table(struct drm_device *dev, u8 *ver)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+
+ if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
+ u8 *perf = ROMPTR(dev, P.data[0]);
+ if (perf) {
+ *ver = perf[0];
+ return perf;
+ }
+ }
+
+ if (bios->type == NVBIOS_BMP) {
+ if (bios->data[bios->offset + 6] >= 0x25) {
+ u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
+ if (perf) {
+ *ver = perf[1];
+ return perf;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static u8 *
+nouveau_perf_entry(struct drm_device *dev, int idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u8 *perf = nouveau_perf_table(dev, ver);
+ if (perf) {
+ if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
+ *hdr = perf[3];
+ *cnt = 0;
+ *len = 0;
+ return perf + perf[0] + idx * perf[3];
+ } else
+ if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
+ *hdr = perf[3];
+ *cnt = perf[4];
+ *len = perf[5];
+ return perf + perf[1] + idx * (*hdr + (*cnt * *len));
+ } else
+ if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
+ *hdr = perf[2];
+ *cnt = perf[4];
+ *len = perf[3];
+ return perf + perf[1] + idx * (*hdr + (*cnt * *len));
+ }
+ }
+ return NULL;
+}
+
+static u8 *
+nouveau_perf_rammap(struct drm_device *dev, u32 freq,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct bit_entry P;
+ u8 *perf, i = 0;
+
+ if (!bit_table(dev, 'P', &P) && P.version == 2) {
+ u8 *rammap = ROMPTR(dev, P.data[4]);
+ if (rammap) {
+ u8 *ramcfg = rammap + rammap[1];
+
+ *ver = rammap[0];
+ *hdr = rammap[2];
+ *cnt = rammap[4];
+ *len = rammap[3];
+
+ freq /= 1000;
+ for (i = 0; i < rammap[5]; i++) {
+ if (freq >= ROM16(ramcfg[0]) &&
+ freq <= ROM16(ramcfg[2]))
+ return ramcfg;
+
+ ramcfg += *hdr + (*cnt * *len);
+ }
+ }
+
+ return NULL;
+ }
+
+ if (dev_priv->chipset == 0x49 ||
+ dev_priv->chipset == 0x4b)
+ freq /= 2;
+
+ while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
+ if (*ver >= 0x20 && *ver < 0x25) {
+ if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
+ break;
+ } else
+ if (*ver >= 0x25 && *ver < 0x40) {
+ if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
+ break;
+ }
+ }
+
+ if (perf) {
+ u8 *ramcfg = perf + *hdr;
+ *ver = 0x00;
+ *hdr = 0;
+ return ramcfg;
+ }
+
+ return NULL;
+}
+
+u8 *
+nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ u8 strap, hdr, cnt;
+ u8 *rammap;
+
+ strap = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
+ if (bios->ram_restrict_tbl_ptr)
+ strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
+
+ rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
+ if (rammap && strap < cnt)
+ return rammap + hdr + (strap * *len);
+
+ return NULL;
+}
+
+u8 *
+nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 *perf, *timing = NULL;
+ u8 i = 0, hdr, cnt;
+
+ if (bios->type == NVBIOS_BMP) {
+ while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
+ len)) && *ver == 0x15) {
+ if (freq <= ROM32(perf[5]) * 20) {
+ *ver = 0x00;
+ *len = 14;
+ return perf + 41;
+ }
+ }
+ return NULL;
+ }
+
+ if (!bit_table(dev, 'P', &P)) {
+ if (P.version == 1)
+ timing = ROMPTR(dev, P.data[4]);
+ else
+ if (P.version == 2)
+ timing = ROMPTR(dev, P.data[8]);
+ }
+
+ if (timing && timing[0] == 0x10) {
+ u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
+ if (ramcfg && ramcfg[1] < timing[2]) {
+ *ver = timing[0];
+ *len = timing[3];
+ return timing + timing[1] + (ramcfg[1] * timing[3]);
+ }
+ }
+
+ return NULL;
+}
+
static void
legacy_perf_init(struct drm_device *dev)
{
@@ -72,74 +244,11 @@ legacy_perf_init(struct drm_device *dev)
pm->nr_perflvl = 1;
}
-static struct nouveau_pm_memtiming *
-nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
- u16 memclk, u8 *entry, u8 recordlen, u8 entries)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nvbios *bios = &dev_priv->vbios;
- u8 ramcfg;
- int i;
-
- /* perf v2 has a separate "timing map" table, we have to match
- * the target memory clock to a specific entry, *then* use
- * ramcfg to select the correct subentry
- */
- if (P->version == 2) {
- u8 *tmap = ROMPTR(dev, P->data[4]);
- if (!tmap) {
- NV_DEBUG(dev, "no timing map pointer\n");
- return NULL;
- }
-
- if (tmap[0] != 0x10) {
- NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]);
- return NULL;
- }
-
- entry = tmap + tmap[1];
- recordlen = tmap[2] + (tmap[4] * tmap[3]);
- for (i = 0; i < tmap[5]; i++, entry += recordlen) {
- if (memclk >= ROM16(entry[0]) &&
- memclk <= ROM16(entry[2]))
- break;
- }
-
- if (i == tmap[5]) {
- NV_WARN(dev, "no match in timing map table\n");
- return NULL;
- }
-
- entry += tmap[2];
- recordlen = tmap[3];
- entries = tmap[4];
- }
-
- ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
- if (bios->ram_restrict_tbl_ptr)
- ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg];
-
- if (ramcfg >= entries) {
- NV_WARN(dev, "ramcfg strap out of bounds!\n");
- return NULL;
- }
-
- entry += ramcfg * recordlen;
- if (entry[1] >= pm->memtimings.nr_timing) {
- if (entry[1] != 0xff)
- NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
- return NULL;
- }
-
- return &pm->memtimings.timing[entry[1]];
-}
-
static void
-nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
- struct nouveau_pm_level *perflvl)
+nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct bit_entry P;
u8 *vmap;
int id;
@@ -158,13 +267,13 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
/* on newer ones, the perflvl stores an index into yet another
* vbios table containing a min/max voltage value for the perflvl
*/
- if (P->version != 2 || P->length < 34) {
+ if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n",
- P->version, P->length);
+ P.version, P.length);
return;
}
- vmap = ROMPTR(dev, P->data[32]);
+ vmap = ROMPTR(dev, P.data[32]);
if (!vmap) {
NV_DEBUG(dev, "volt map table pointer invalid\n");
return;
@@ -183,130 +292,70 @@ nouveau_perf_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nvbios *bios = &dev_priv->vbios;
- struct bit_entry P;
- struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
- struct nouveau_pm_tbl_header mt_hdr;
- u8 version, headerlen, recordlen, entries;
- u8 *perf, *entry;
- int vid, i;
-
- if (bios->type == NVBIOS_BIT) {
- if (bit_table(dev, 'P', &P))
- return;
-
- if (P.version != 1 && P.version != 2) {
- NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
- return;
- }
-
- perf = ROMPTR(dev, P.data[0]);
- version = perf[0];
- headerlen = perf[1];
- if (version < 0x40) {
- recordlen = perf[3] + (perf[4] * perf[5]);
- entries = perf[2];
-
- pm->pwm_divisor = ROM16(perf[6]);
- } else {
- recordlen = perf[2] + (perf[3] * perf[4]);
- entries = perf[5];
- }
- } else {
- if (bios->data[bios->offset + 6] < 0x25) {
- legacy_perf_init(dev);
- return;
- }
+ u8 *perf, ver, hdr, cnt, len;
+ int ret, vid, i = -1;
- perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
- if (!perf) {
- NV_DEBUG(dev, "perf table pointer invalid\n");
- return;
- }
-
- version = perf[1];
- headerlen = perf[0];
- recordlen = perf[3];
- entries = perf[2];
- }
-
- if (entries > NOUVEAU_PM_MAX_LEVEL) {
- NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
- entries = NOUVEAU_PM_MAX_LEVEL;
+ if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
+ legacy_perf_init(dev);
+ return;
}
- entry = perf + headerlen;
-
- /* For version 0x15, initialize memtiming table */
- if(version == 0x15) {
- memtimings->timing =
- kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
- if (!memtimings->timing) {
- NV_WARN(dev,"Could not allocate memtiming table\n");
- return;
- }
-
- mt_hdr.entry_cnt = entries;
- mt_hdr.entry_len = 14;
- mt_hdr.version = version;
- mt_hdr.header_len = 4;
- }
+ perf = nouveau_perf_table(dev, &ver);
+ if (ver >= 0x20 && ver < 0x40)
+ pm->fan.pwm_divisor = ROM16(perf[6]);
- for (i = 0; i < entries; i++) {
+ while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
- perflvl->timing = NULL;
-
- if (entry[0] == 0xff) {
- entry += recordlen;
+ if (perf[0] == 0xff)
continue;
- }
- switch (version) {
+ switch (ver) {
case 0x12:
case 0x13:
case 0x15:
- perflvl->fanspeed = entry[55];
- if (recordlen > 56)
- perflvl->volt_min = entry[56];
- perflvl->core = ROM32(entry[1]) * 10;
- perflvl->memory = ROM32(entry[5]) * 20;
+ perflvl->fanspeed = perf[55];
+ if (hdr > 56)
+ perflvl->volt_min = perf[56];
+ perflvl->core = ROM32(perf[1]) * 10;
+ perflvl->memory = ROM32(perf[5]) * 20;
break;
case 0x21:
case 0x23:
case 0x24:
- perflvl->fanspeed = entry[4];
- perflvl->volt_min = entry[5];
- perflvl->shader = ROM16(entry[6]) * 1000;
+ perflvl->fanspeed = perf[4];
+ perflvl->volt_min = perf[5];
+ perflvl->shader = ROM16(perf[6]) * 1000;
perflvl->core = perflvl->shader;
- perflvl->core += (signed char)entry[8] * 1000;
+ perflvl->core += (signed char)perf[8] * 1000;
if (dev_priv->chipset == 0x49 ||
dev_priv->chipset == 0x4b)
- perflvl->memory = ROM16(entry[11]) * 1000;
+ perflvl->memory = ROM16(perf[11]) * 1000;
else
- perflvl->memory = ROM16(entry[11]) * 2000;
+ perflvl->memory = ROM16(perf[11]) * 2000;
break;
case 0x25:
- perflvl->fanspeed = entry[4];
- perflvl->volt_min = entry[5];
- perflvl->core = ROM16(entry[6]) * 1000;
- perflvl->shader = ROM16(entry[10]) * 1000;
- perflvl->memory = ROM16(entry[12]) * 1000;
+ perflvl->fanspeed = perf[4];
+ perflvl->volt_min = perf[5];
+ perflvl->core = ROM16(perf[6]) * 1000;
+ perflvl->shader = ROM16(perf[10]) * 1000;
+ perflvl->memory = ROM16(perf[12]) * 1000;
break;
case 0x30:
- perflvl->memscript = ROM16(entry[2]);
+ perflvl->memscript = ROM16(perf[2]);
case 0x35:
- perflvl->fanspeed = entry[6];
- perflvl->volt_min = entry[7];
- perflvl->core = ROM16(entry[8]) * 1000;
- perflvl->shader = ROM16(entry[10]) * 1000;
- perflvl->memory = ROM16(entry[12]) * 1000;
- perflvl->vdec = ROM16(entry[16]) * 1000;
- perflvl->dom6 = ROM16(entry[20]) * 1000;
+ perflvl->fanspeed = perf[6];
+ perflvl->volt_min = perf[7];
+ perflvl->core = ROM16(perf[8]) * 1000;
+ perflvl->shader = ROM16(perf[10]) * 1000;
+ perflvl->memory = ROM16(perf[12]) * 1000;
+ perflvl->vdec = ROM16(perf[16]) * 1000;
+ perflvl->dom6 = ROM16(perf[20]) * 1000;
break;
case 0x40:
-#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
+#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
perflvl->fanspeed = 0; /*XXX*/
- perflvl->volt_min = entry[2];
+ perflvl->volt_min = perf[2];
if (dev_priv->card_type == NV_50) {
perflvl->core = subent(0);
perflvl->shader = subent(1);
@@ -329,36 +378,34 @@ nouveau_perf_init(struct drm_device *dev)
}
/* make sure vid is valid */
- nouveau_perf_voltage(dev, &P, perflvl);
+ nouveau_perf_voltage(dev, perflvl);
if (pm->voltage.supported && perflvl->volt_min) {
vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
if (vid < 0) {
- NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
- entry += recordlen;
+ NV_DEBUG(dev, "perflvl %d, bad vid\n", i);
continue;
}
}
/* get the corresponding memory timings */
- if (version == 0x15) {
- memtimings->timing[i].id = i;
- nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]);
- perflvl->timing = &memtimings->timing[i];
- } else if (version > 0x15) {
- /* last 3 args are for < 0x40, ignored for >= 0x40 */
- perflvl->timing =
- nouveau_perf_timing(dev, &P,
- perflvl->memory / 1000,
- entry + perf[3],
- perf[5], perf[4]);
+ ret = nouveau_mem_timing_calc(dev, perflvl->memory,
+ &perflvl->timing);
+ if (ret) {
+ NV_DEBUG(dev, "perflvl %d, bad timing: %d\n", i, ret);
+ continue;
}
snprintf(perflvl->name, sizeof(perflvl->name),
"performance_level_%d", i);
perflvl->id = i;
- pm->nr_perflvl++;
- entry += recordlen;
+ snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
+ "%d", perflvl->id);
+ perflvl->profile.func = &nouveau_pm_static_profile_func;
+ list_add_tail(&perflvl->profile.head, &pm->profiles);
+
+
+ pm->nr_perflvl++;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9064d7f..34d591b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -50,7 +50,7 @@ nouveau_pwmfan_get(struct drm_device *dev)
ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
if (ret == 0) {
ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
- if (ret == 0) {
+ if (ret == 0 && divs) {
divs = max(divs, duty);
if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
duty = divs - duty;
@@ -77,7 +77,7 @@ nouveau_pwmfan_set(struct drm_device *dev, int percent)
ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
if (ret == 0) {
- divs = pm->pwm_divisor;
+ divs = pm->fan.pwm_divisor;
if (pm->fan.pwm_freq) {
/*XXX: PNVIO clock more than likely... */
divs = 135000 / pm->fan.pwm_freq;
@@ -89,7 +89,10 @@ nouveau_pwmfan_set(struct drm_device *dev, int percent)
if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
duty = divs - duty;
- return pm->pwm_set(dev, gpio.line, divs, duty);
+ ret = pm->pwm_set(dev, gpio.line, divs, duty);
+ if (!ret)
+ pm->fan.percent = percent;
+ return ret;
}
return -ENODEV;
@@ -144,9 +147,13 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
return ret;
state = pm->clocks_pre(dev, perflvl);
- if (IS_ERR(state))
- return PTR_ERR(state);
- pm->clocks_set(dev, state);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ goto error;
+ }
+ ret = pm->clocks_set(dev, state);
+ if (ret)
+ goto error;
ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
if (ret)
@@ -154,6 +161,65 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
pm->cur = perflvl;
return 0;
+
+error:
+ /* restore the fan speed and voltage before leaving */
+ nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+ return ret;
+}
+
+void
+nouveau_pm_trigger(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_profile *profile = NULL;
+ struct nouveau_pm_level *perflvl = NULL;
+ int ret;
+
+ /* select power profile based on current power source */
+ if (power_supply_is_system_supplied())
+ profile = pm->profile_ac;
+ else
+ profile = pm->profile_dc;
+
+ if (profile != pm->profile) {
+ pm->profile->func->fini(pm->profile);
+ pm->profile = profile;
+ pm->profile->func->init(pm->profile);
+ }
+
+ /* select performance level based on profile */
+ perflvl = profile->func->select(profile);
+
+ /* change perflvl, if necessary */
+ if (perflvl != pm->cur) {
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ u64 time0 = ptimer->read(dev);
+
+ NV_INFO(dev, "setting performance level: %d", perflvl->id);
+ ret = nouveau_pm_perflvl_set(dev, perflvl);
+ if (ret)
+ NV_INFO(dev, "> reclocking failed: %d\n\n", ret);
+
+ NV_INFO(dev, "> reclocking took %lluns\n\n",
+ ptimer->read(dev) - time0);
+ }
+}
+
+static struct nouveau_pm_profile *
+profile_find(struct drm_device *dev, const char *string)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_profile *profile;
+
+ list_for_each_entry(profile, &pm->profiles, head) {
+ if (!strncmp(profile->name, string, sizeof(profile->name)))
+ return profile;
+ }
+
+ return NULL;
}
static int
@@ -161,33 +227,54 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- struct nouveau_pm_level *perflvl = NULL;
+ struct nouveau_pm_profile *ac = NULL, *dc = NULL;
+ char string[16], *cur = string, *ptr;
/* safety precaution, for now */
if (nouveau_perflvl_wr != 7777)
return -EPERM;
- if (!strncmp(profile, "boot", 4))
- perflvl = &pm->boot;
- else {
- int pl = simple_strtol(profile, NULL, 10);
- int i;
+ strncpy(string, profile, sizeof(string));
+ if ((ptr = strchr(string, '\n')))
+ *ptr = '\0';
- for (i = 0; i < pm->nr_perflvl; i++) {
- if (pm->perflvl[i].id == pl) {
- perflvl = &pm->perflvl[i];
- break;
- }
- }
+ ptr = strsep(&cur, ",");
+ if (ptr)
+ ac = profile_find(dev, ptr);
- if (!perflvl)
- return -EINVAL;
- }
+ ptr = strsep(&cur, ",");
+ if (ptr)
+ dc = profile_find(dev, ptr);
+ else
+ dc = ac;
+
+ if (ac == NULL || dc == NULL)
+ return -EINVAL;
+
+ pm->profile_ac = ac;
+ pm->profile_dc = dc;
+ nouveau_pm_trigger(dev);
+ return 0;
+}
+
+static void
+nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
+{
+}
- NV_INFO(dev, "setting performance level: %s\n", profile);
- return nouveau_pm_perflvl_set(dev, perflvl);
+static struct nouveau_pm_level *
+nouveau_pm_static_select(struct nouveau_pm_profile *profile)
+{
+ return container_of(profile, struct nouveau_pm_level, profile);
}
+const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
+ .destroy = nouveau_pm_static_dummy,
+ .init = nouveau_pm_static_dummy,
+ .fini = nouveau_pm_static_dummy,
+ .select = nouveau_pm_static_select,
+};
+
static int
nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
@@ -197,9 +284,11 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
memset(perflvl, 0, sizeof(*perflvl));
- ret = pm->clocks_get(dev, perflvl);
- if (ret)
- return ret;
+ if (pm->clocks_get) {
+ ret = pm->clocks_get(dev, perflvl);
+ if (ret)
+ return ret;
+ }
if (pm->voltage.supported && pm->voltage_get) {
ret = pm->voltage_get(dev);
@@ -213,13 +302,14 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
if (ret > 0)
perflvl->fanspeed = ret;
+ nouveau_mem_timing_read(dev, &perflvl->timing);
return 0;
}
static void
nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
{
- char c[16], s[16], v[32], f[16], t[16], m[16];
+ char c[16], s[16], v[32], f[16], m[16];
c[0] = '\0';
if (perflvl->core)
@@ -247,18 +337,15 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
if (perflvl->fanspeed)
snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
- t[0] = '\0';
- if (perflvl->timing)
- snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
-
- snprintf(ptr, len, "%s%s%s%s%s%s\n", c, s, m, t, v, f);
+ snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
}
static ssize_t
nouveau_pm_get_perflvl_info(struct device *d,
struct device_attribute *a, char *buf)
{
- struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
+ struct nouveau_pm_level *perflvl =
+ container_of(a, struct nouveau_pm_level, dev_attr);
char *ptr = buf;
int len = PAGE_SIZE;
@@ -280,12 +367,8 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
int len = PAGE_SIZE, ret;
char *ptr = buf;
- if (!pm->cur)
- snprintf(ptr, len, "setting: boot\n");
- else if (pm->cur == &pm->boot)
- snprintf(ptr, len, "setting: boot\nc:");
- else
- snprintf(ptr, len, "setting: static %d\nc:", pm->cur->id);
+ snprintf(ptr, len, "profile: %s, %s\nc:",
+ pm->profile_ac->name, pm->profile_dc->name);
ptr += strlen(buf);
len -= strlen(buf);
@@ -397,7 +480,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
long value;
- if (strict_strtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
temp->down_clock = value/1000;
@@ -432,7 +515,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
long value;
- if (strict_strtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value) == -EINVAL)
return count;
temp->critical = value/1000;
@@ -529,7 +612,7 @@ nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
if (nouveau_perflvl_wr != 7777)
return -EPERM;
- if (strict_strtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value) == -EINVAL)
return -EINVAL;
if (value < pm->fan.min_duty)
@@ -568,7 +651,7 @@ nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
long value;
- if (strict_strtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value) == -EINVAL)
return -EINVAL;
if (value < 0)
@@ -609,7 +692,7 @@ nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
long value;
- if (strict_strtol(buf, 10, &value) == -EINVAL)
+ if (kstrtol(buf, 10, &value) == -EINVAL)
return -EINVAL;
if (value < 0)
@@ -731,8 +814,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
if (pm->hwmon) {
sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup);
- sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj,
+ &hwmon_pwm_fan_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj,
+ &hwmon_fan_rpm_attrgroup);
hwmon_device_unregister(pm->hwmon);
}
@@ -752,6 +837,7 @@ nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
bool ac = power_supply_is_system_supplied();
NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
+ nouveau_pm_trigger(dev);
}
return NOTIFY_OK;
@@ -766,35 +852,48 @@ nouveau_pm_init(struct drm_device *dev)
char info[256];
int ret, i;
- nouveau_mem_timing_init(dev);
+ /* parse aux tables from vbios */
nouveau_volt_init(dev);
- nouveau_perf_init(dev);
nouveau_temp_init(dev);
+ /* determine current ("boot") performance level */
+ ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+ if (ret) {
+ NV_ERROR(dev, "failed to determine boot perflvl\n");
+ return ret;
+ }
+
+ strncpy(pm->boot.name, "boot", 4);
+ strncpy(pm->boot.profile.name, "boot", 4);
+ pm->boot.profile.func = &nouveau_pm_static_profile_func;
+
+ INIT_LIST_HEAD(&pm->profiles);
+ list_add(&pm->boot.profile.head, &pm->profiles);
+
+ pm->profile_ac = &pm->boot.profile;
+ pm->profile_dc = &pm->boot.profile;
+ pm->profile = &pm->boot.profile;
+ pm->cur = &pm->boot;
+
+ /* add performance levels from vbios */
+ nouveau_perf_init(dev);
+
+ /* display available performance levels */
NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
for (i = 0; i < pm->nr_perflvl; i++) {
nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
}
- /* determine current ("boot") performance level */
- ret = nouveau_pm_perflvl_get(dev, &pm->boot);
- if (ret == 0) {
- strncpy(pm->boot.name, "boot", 4);
- pm->cur = &pm->boot;
-
- nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
- NV_INFO(dev, "c:%s", info);
- }
+ nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+ NV_INFO(dev, "c:%s", info);
/* switch performance levels now if requested */
- if (nouveau_perflvl != NULL) {
- ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
- if (ret) {
- NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
- nouveau_perflvl, ret);
- }
- }
+ if (nouveau_perflvl != NULL)
+ nouveau_pm_profile_set(dev, nouveau_perflvl);
+
+ /* determine the current fan speed */
+ pm->fan.percent = nouveau_pwmfan_get(dev);
nouveau_sysfs_init(dev);
nouveau_hwmon_init(dev);
@@ -811,6 +910,12 @@ nouveau_pm_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_profile *profile, *tmp;
+
+ list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
+ list_del(&profile->head);
+ profile->func->destroy(profile);
+ }
if (pm->cur != &pm->boot)
nouveau_pm_perflvl_set(dev, &pm->boot);
@@ -818,7 +923,6 @@ nouveau_pm_fini(struct drm_device *dev)
nouveau_temp_fini(dev);
nouveau_perf_fini(dev);
nouveau_volt_fini(dev);
- nouveau_mem_timing_fini(dev);
#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
unregister_acpi_notifier(&pm->acpi_nb);
@@ -840,4 +944,5 @@ nouveau_pm_resume(struct drm_device *dev)
perflvl = pm->cur;
pm->cur = &pm->boot;
nouveau_pm_perflvl_set(dev, perflvl);
+ nouveau_pwmfan_set(dev, pm->fan.percent);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 2f8e14f..3f82dfe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -25,10 +25,30 @@
#ifndef __NOUVEAU_PM_H__
#define __NOUVEAU_PM_H__
+struct nouveau_mem_exec_func {
+ struct drm_device *dev;
+ void (*precharge)(struct nouveau_mem_exec_func *);
+ void (*refresh)(struct nouveau_mem_exec_func *);
+ void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
+ void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
+ void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
+ u32 (*mrg)(struct nouveau_mem_exec_func *, int mr);
+ void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
+ void (*clock_set)(struct nouveau_mem_exec_func *);
+ void (*timing_set)(struct nouveau_mem_exec_func *);
+ void *priv;
+};
+
+/* nouveau_mem.c */
+int nouveau_mem_exec(struct nouveau_mem_exec_func *,
+ struct nouveau_pm_level *);
+
/* nouveau_pm.c */
int nouveau_pm_init(struct drm_device *dev);
void nouveau_pm_fini(struct drm_device *dev);
void nouveau_pm_resume(struct drm_device *dev);
+extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
+void nouveau_pm_trigger(struct drm_device *dev);
/* nouveau_volt.c */
void nouveau_volt_init(struct drm_device *);
@@ -41,6 +61,8 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
/* nouveau_perf.c */
void nouveau_perf_init(struct drm_device *);
void nouveau_perf_fini(struct drm_device *);
+u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
/* nouveau_mem.c */
void nouveau_mem_timing_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index f80c5e0..9c144fb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -87,7 +87,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clocks_get = nv04_pm_clocks_get;
engine->pm.clocks_pre = nv04_pm_clocks_pre;
engine->pm.clocks_set = nv04_pm_clocks_set;
- engine->vram.init = nouveau_mem_detect;
+ engine->vram.init = nv04_fb_vram_init;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
@@ -134,7 +134,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clocks_get = nv04_pm_clocks_get;
engine->pm.clocks_pre = nv04_pm_clocks_pre;
engine->pm.clocks_set = nv04_pm_clocks_set;
- engine->vram.init = nouveau_mem_detect;
+ if (dev_priv->chipset == 0x1a ||
+ dev_priv->chipset == 0x1f)
+ engine->vram.init = nv1a_fb_vram_init;
+ else
+ engine->vram.init = nv10_fb_vram_init;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
@@ -153,11 +157,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.init = nv04_timer_init;
engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
- engine->fb.init = nv10_fb_init;
- engine->fb.takedown = nv10_fb_takedown;
- engine->fb.init_tile_region = nv10_fb_init_tile_region;
- engine->fb.set_tile_region = nv10_fb_set_tile_region;
- engine->fb.free_tile_region = nv10_fb_free_tile_region;
+ engine->fb.init = nv20_fb_init;
+ engine->fb.takedown = nv20_fb_takedown;
+ engine->fb.init_tile_region = nv20_fb_init_tile_region;
+ engine->fb.set_tile_region = nv20_fb_set_tile_region;
+ engine->fb.free_tile_region = nv20_fb_free_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nv04_fifo_fini;
@@ -181,7 +185,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clocks_get = nv04_pm_clocks_get;
engine->pm.clocks_pre = nv04_pm_clocks_pre;
engine->pm.clocks_set = nv04_pm_clocks_set;
- engine->vram.init = nouveau_mem_detect;
+ engine->vram.init = nv20_fb_vram_init;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
@@ -230,7 +234,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clocks_set = nv04_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
- engine->vram.init = nouveau_mem_detect;
+ engine->vram.init = nv20_fb_vram_init;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
@@ -286,7 +290,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv40_temp_get;
engine->pm.pwm_get = nv40_pm_pwm_get;
engine->pm.pwm_set = nv40_pm_pwm_set;
- engine->vram.init = nouveau_mem_detect;
+ engine->vram.init = nv40_fb_vram_init;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
@@ -588,47 +592,45 @@ nouveau_card_init(struct drm_device *dev)
nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
}
- nouveau_pm_init(dev);
-
- ret = engine->vram.init(dev);
+ /* PMC */
+ ret = engine->mc.init(dev);
if (ret)
goto out_bios;
- ret = nouveau_gpuobj_init(dev);
+ /* PTIMER */
+ ret = engine->timer.init(dev);
if (ret)
- goto out_vram;
+ goto out_mc;
- ret = engine->instmem.init(dev);
+ /* PFB */
+ ret = engine->fb.init(dev);
if (ret)
- goto out_gpuobj;
+ goto out_timer;
- ret = nouveau_mem_vram_init(dev);
+ ret = engine->vram.init(dev);
if (ret)
- goto out_instmem;
+ goto out_fb;
- ret = nouveau_mem_gart_init(dev);
+ /* PGPIO */
+ ret = nouveau_gpio_create(dev);
if (ret)
- goto out_ttmvram;
+ goto out_vram;
- /* PMC */
- ret = engine->mc.init(dev);
+ ret = nouveau_gpuobj_init(dev);
if (ret)
- goto out_gart;
+ goto out_gpio;
- /* PGPIO */
- ret = nouveau_gpio_create(dev);
+ ret = engine->instmem.init(dev);
if (ret)
- goto out_mc;
+ goto out_gpuobj;
- /* PTIMER */
- ret = engine->timer.init(dev);
+ ret = nouveau_mem_vram_init(dev);
if (ret)
- goto out_gpio;
+ goto out_instmem;
- /* PFB */
- ret = engine->fb.init(dev);
+ ret = nouveau_mem_gart_init(dev);
if (ret)
- goto out_timer;
+ goto out_ttmvram;
if (!dev_priv->noaccel) {
switch (dev_priv->card_type) {
@@ -734,11 +736,12 @@ nouveau_card_init(struct drm_device *dev)
goto out_irq;
nouveau_backlight_init(dev);
+ nouveau_pm_init(dev);
if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_fence_init(dev);
if (ret)
- goto out_disp;
+ goto out_pm;
ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
NvDmaFB, NvDmaTT);
@@ -762,7 +765,8 @@ out_chan:
nouveau_channel_put_unlocked(&dev_priv->channel);
out_fence:
nouveau_fence_fini(dev);
-out_disp:
+out_pm:
+ nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
out_irq:
@@ -779,15 +783,6 @@ out_engine:
dev_priv->eng[e]->destroy(dev,e );
}
}
-
- engine->fb.takedown(dev);
-out_timer:
- engine->timer.takedown(dev);
-out_gpio:
- nouveau_gpio_destroy(dev);
-out_mc:
- engine->mc.takedown(dev);
-out_gart:
nouveau_mem_gart_fini(dev);
out_ttmvram:
nouveau_mem_vram_fini(dev);
@@ -795,10 +790,17 @@ out_instmem:
engine->instmem.takedown(dev);
out_gpuobj:
nouveau_gpuobj_takedown(dev);
+out_gpio:
+ nouveau_gpio_destroy(dev);
out_vram:
engine->vram.takedown(dev);
+out_fb:
+ engine->fb.takedown(dev);
+out_timer:
+ engine->timer.takedown(dev);
+out_mc:
+ engine->mc.takedown(dev);
out_bios:
- nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
out_display_early:
engine->display.late_takedown(dev);
@@ -823,6 +825,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_fence_fini(dev);
}
+ nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
@@ -835,11 +838,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
}
}
- engine->fb.takedown(dev);
- engine->timer.takedown(dev);
- nouveau_gpio_destroy(dev);
- engine->mc.takedown(dev);
- engine->display.late_takedown(dev);
if (dev_priv->vga_ram) {
nouveau_bo_unpin(dev_priv->vga_ram);
@@ -855,12 +853,17 @@ static void nouveau_card_takedown(struct drm_device *dev)
engine->instmem.takedown(dev);
nouveau_gpuobj_takedown(dev);
- engine->vram.takedown(dev);
- nouveau_irq_fini(dev);
+ nouveau_gpio_destroy(dev);
+ engine->vram.takedown(dev);
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->mc.takedown(dev);
- nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
+ engine->display.late_takedown(dev);
+
+ nouveau_irq_fini(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -990,7 +993,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
- uint32_t reg0, strap;
+ uint32_t reg0 = ~0, strap;
resource_size_t mmio_start_offs;
int ret;
@@ -1002,15 +1005,72 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = dev_priv;
dev_priv->dev = dev;
+ pci_set_master(dev->pdev);
+
dev_priv->flags = flags & NOUVEAU_FLAGS;
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- /* resource 0 is mmio regs */
- /* resource 1 is linear FB */
- /* resource 2 is RAMIN (mmio regs + 0x1000000) */
- /* resource 6 is bios */
+ /* first up, map the start of mmio and determine the chipset */
+ dev_priv->mmio = ioremap(pci_resource_start(dev->pdev, 0), PAGE_SIZE);
+ if (dev_priv->mmio) {
+#ifdef __BIG_ENDIAN
+ /* put the card into big-endian mode if it's not */
+ if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
+ nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
+ DRM_MEMORYBARRIER();
+#endif
+
+ /* determine chipset and derive architecture from it */
+ reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
+ if ((reg0 & 0x0f000000) > 0) {
+ dev_priv->chipset = (reg0 & 0xff00000) >> 20;
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x10:
+ case 0x20:
+ case 0x30:
+ dev_priv->card_type = dev_priv->chipset & 0xf0;
+ break;
+ case 0x40:
+ case 0x60:
+ dev_priv->card_type = NV_40;
+ break;
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ case 0xa0:
+ dev_priv->card_type = NV_50;
+ break;
+ case 0xc0:
+ dev_priv->card_type = NV_C0;
+ break;
+ case 0xd0:
+ dev_priv->card_type = NV_D0;
+ break;
+ default:
+ break;
+ }
+ } else
+ if ((reg0 & 0xff00fff0) == 0x20004000) {
+ if (reg0 & 0x00f00000)
+ dev_priv->chipset = 0x05;
+ else
+ dev_priv->chipset = 0x04;
+ dev_priv->card_type = NV_04;
+ }
+
+ iounmap(dev_priv->mmio);
+ }
+
+ if (!dev_priv->card_type) {
+ NV_ERROR(dev, "unsupported chipset 0x%08x\n", reg0);
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ dev_priv->card_type, reg0);
/* map the mmio regs */
mmio_start_offs = pci_resource_start(dev->pdev, 0);
@@ -1024,62 +1084,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
-#ifdef __BIG_ENDIAN
- /* Put the card in BE mode if it's not */
- if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
- nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
-
- DRM_MEMORYBARRIER();
-#endif
-
- /* Time to determine the card architecture */
- reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
-
- /* We're dealing with >=NV10 */
- if ((reg0 & 0x0f000000) > 0) {
- /* Bit 27-20 contain the architecture in hex */
- dev_priv->chipset = (reg0 & 0xff00000) >> 20;
- /* NV04 or NV05 */
- } else if ((reg0 & 0xff00fff0) == 0x20004000) {
- if (reg0 & 0x00f00000)
- dev_priv->chipset = 0x05;
- else
- dev_priv->chipset = 0x04;
- } else
- dev_priv->chipset = 0xff;
-
- switch (dev_priv->chipset & 0xf0) {
- case 0x00:
- case 0x10:
- case 0x20:
- case 0x30:
- dev_priv->card_type = dev_priv->chipset & 0xf0;
- break;
- case 0x40:
- case 0x60:
- dev_priv->card_type = NV_40;
- break;
- case 0x50:
- case 0x80:
- case 0x90:
- case 0xa0:
- dev_priv->card_type = NV_50;
- break;
- case 0xc0:
- dev_priv->card_type = NV_C0;
- break;
- case 0xd0:
- dev_priv->card_type = NV_D0;
- break;
- default:
- NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
- ret = -EINVAL;
- goto err_mmio;
- }
-
- NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
- dev_priv->card_type, reg0);
-
/* determine frequency of timing crystal */
strap = nv_rd32(dev, 0x101000);
if ( dev_priv->chipset < 0x17 ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
index 638cf60..d5eedd6 100644
--- a/drivers/gpu/drm/nouveau/nv04_fb.c
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -4,6 +4,40 @@
#include "nouveau_drm.h"
int
+nv04_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
+
+ if (boot0 & 0x00000100) {
+ dev_priv->vram_size = ((boot0 >> 12) & 0xf) * 2 + 2;
+ dev_priv->vram_size *= 1024 * 1024;
+ } else {
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+ dev_priv->vram_size = 32 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+ dev_priv->vram_size = 16 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+ dev_priv->vram_size = 8 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+ dev_priv->vram_size = 4 * 1024 * 1024;
+ break;
+ }
+ }
+
+ if ((boot0 & 0x00000038) <= 0x10)
+ dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
+ else
+ dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
+
+ return 0;
+}
+
+int
nv04_fb_init(struct drm_device *dev)
{
/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index f78181a..420b1608 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,81 +3,16 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
-static struct drm_mm_node *
-nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct drm_mm_node *mem;
- int ret;
-
- ret = drm_mm_pre_get(&pfb->tag_heap);
- if (ret)
- return NULL;
-
- spin_lock(&dev_priv->tile.lock);
- mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
- if (mem)
- mem = drm_mm_get_block_atomic(mem, size, 0);
- spin_unlock(&dev_priv->tile.lock);
-
- return mem;
-}
-
-static void
-nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- spin_lock(&dev_priv->tile.lock);
- drm_mm_put_block(mem);
- spin_unlock(&dev_priv->tile.lock);
-}
-
void
nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
- tile->addr = addr;
+ tile->addr = 0x80000000 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
-
- if (dev_priv->card_type == NV_20) {
- if (flags & NOUVEAU_GEM_TILE_ZETA) {
- /*
- * Allocate some of the on-die tag memory,
- * used to store Z compression meta-data (most
- * likely just a bitmap determining if a given
- * tile is compressed or not).
- */
- tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
-
- if (tile->tag_mem) {
- /* Enable Z compression */
- if (dev_priv->chipset >= 0x25)
- tile->zcomp = tile->tag_mem->start |
- (bpp == 16 ?
- NV25_PFB_ZCOMP_MODE_16 :
- NV25_PFB_ZCOMP_MODE_32);
- else
- tile->zcomp = tile->tag_mem->start |
- NV20_PFB_ZCOMP_EN |
- (bpp == 16 ? 0 :
- NV20_PFB_ZCOMP_MODE_32);
- }
-
- tile->addr |= 3;
- } else {
- tile->addr |= 1;
- }
-
- } else {
- tile->addr |= 1 << 31;
- }
}
void
@@ -86,11 +21,6 @@ nv10_fb_free_tile_region(struct drm_device *dev, int i)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- if (tile->tag_mem) {
- nv20_fb_free_tag(dev, tile->tag_mem);
- tile->tag_mem = NULL;
- }
-
tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
}
@@ -103,9 +33,48 @@ nv10_fb_set_tile_region(struct drm_device *dev, int i)
nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+}
+
+int
+nv1a_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t mem, mib;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ NV_ERROR(dev, "no bridge device\n");
+ return 0;
+ }
+
+ if (dev_priv->chipset == 0x1a) {
+ pci_read_config_dword(bridge, 0x7c, &mem);
+ mib = ((mem >> 6) & 31) + 1;
+ } else {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ mib = ((mem >> 4) & 127) + 1;
+ }
+
+ dev_priv->vram_size = mib * 1024 * 1024;
+ return 0;
+}
+
+int
+nv10_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ u32 cfg0 = nv_rd32(dev, 0x100200);
- if (dev_priv->card_type == NV_20)
- nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
+ dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
+
+ if (cfg0 & 0x00000001)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR1;
+ else
+ dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
+
+ return 0;
}
int
@@ -115,14 +84,8 @@ nv10_fb_init(struct drm_device *dev)
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i;
- pfb->num_tiles = NV10_PFB_TILE__SIZE;
-
- if (dev_priv->card_type == NV_20)
- drm_mm_init(&pfb->tag_heap, 0,
- (dev_priv->chipset >= 0x25 ?
- 64 * 1024 : 32 * 1024));
-
/* Turn all the tiling regions off. */
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
for (i = 0; i < pfb->num_tiles; i++)
pfb->set_tile_region(dev, i);
@@ -138,7 +101,4 @@ nv10_fb_takedown(struct drm_device *dev)
for (i = 0; i < pfb->num_tiles; i++)
pfb->free_tile_region(dev, i);
-
- if (dev_priv->card_type == NV_20)
- drm_mm_takedown(&pfb->tag_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nv20_fb.c b/drivers/gpu/drm/nouveau/nv20_fb.c
new file mode 100644
index 0000000..19bd640
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_fb.c
@@ -0,0 +1,148 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct drm_mm_node *mem;
+ int ret;
+
+ ret = drm_mm_pre_get(&pfb->tag_heap);
+ if (ret)
+ return NULL;
+
+ spin_lock(&dev_priv->tile.lock);
+ mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block_atomic(mem, size, 0);
+ spin_unlock(&dev_priv->tile.lock);
+
+ return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *mem = *pmem;
+ if (mem) {
+ spin_lock(&dev_priv->tile.lock);
+ drm_mm_put_block(mem);
+ spin_unlock(&dev_priv->tile.lock);
+ *pmem = NULL;
+ }
+}
+
+void
+nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+ tile->addr = 0x00000001 | addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+
+ /* Allocate some of the on-die tag memory, used to store Z
+ * compression meta-data (most likely just a bitmap determining
+ * if a given tile is compressed or not).
+ */
+ if (flags & NOUVEAU_GEM_TILE_ZETA) {
+ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+ if (tile->tag_mem) {
+ /* Enable Z compression */
+ tile->zcomp = tile->tag_mem->start;
+ if (dev_priv->chipset >= 0x25) {
+ if (bpp == 16)
+ tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
+ else
+ tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
+ } else {
+ tile->zcomp |= NV20_PFB_ZCOMP_EN;
+ if (bpp != 16)
+ tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
+ }
+ }
+
+ tile->addr |= 2;
+ }
+}
+
+void
+nv20_fb_free_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+ nv20_fb_free_tag(dev, &tile->tag_mem);
+}
+
+void
+nv20_fb_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+ nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
+}
+
+int
+nv20_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mem_size = nv_rd32(dev, 0x10020c);
+ u32 pbus1218 = nv_rd32(dev, 0x001218);
+
+ dev_priv->vram_size = mem_size & 0xff000000;
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
+ }
+
+ return 0;
+}
+
+int
+nv20_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ if (dev_priv->chipset >= 0x25)
+ drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
+ else
+ drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
+
+ /* Turn all the tiling regions off. */
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_tile_region(dev, i);
+
+ return 0;
+}
+
+void
+nv20_fb_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->free_tile_region(dev, i);
+
+ drm_mm_takedown(&pfb->tag_heap);
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index f0ac2a7..7fbcb33 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -72,6 +72,51 @@ nv44_fb_init_gart(struct drm_device *dev)
}
int
+nv40_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* 0x001218 is actually present on a few other NV4X I looked at,
+ * and even contains sane values matching 0x100474. From looking
+ * at various vbios images however, this isn't the case everywhere.
+ * So, I chose to use the same regs I've seen NVIDIA reading around
+ * the memory detection, hopefully that'll get us the right numbers
+ */
+ if (dev_priv->chipset == 0x40) {
+ u32 pbus1218 = nv_rd32(dev, 0x001218);
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
+ }
+ } else
+ if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ u32 pfb914 = nv_rd32(dev, 0x100914);
+ switch (pfb914 & 0x00000003) {
+ case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000003: break;
+ }
+ } else
+ if (dev_priv->chipset != 0x4e) {
+ u32 pfb474 = nv_rd32(dev, 0x100474);
+ if (pfb474 & 0x00000004)
+ dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR1;
+ } else {
+ dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
+ }
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+int
nv40_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 8f6c2ac..701b927 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -170,6 +170,41 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
return ret;
}
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
+ int ret;
+ int adj;
+ u32 hue, vib;
+
+ NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n",
+ nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
+
+ ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+ if (ret) {
+ NV_ERROR(dev, "no space while setting color vibrance\n");
+ return ret;
+ }
+
+ adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+ vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+
+ hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+ OUT_RING (evo, (hue << 20) | (vib << 8));
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
+ FIRE_RING (evo);
+ }
+
+ return 0;
+}
+
struct nouveau_connector *
nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
{
@@ -577,8 +612,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
OUT_RING (evo, fb->base.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
OUT_RING (evo, (y << 16) | x);
@@ -661,6 +694,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
nv_crtc->set_dither(nv_crtc, false);
nv_crtc->set_scale(nv_crtc, false);
+ nv_crtc->set_color_vibrance(nv_crtc, false);
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -721,6 +755,9 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
+ nv_crtc->color_vibrance = 50;
+ nv_crtc->vibrant_hue = 0;
+
/* Default CLUT parameters, will be activated on the hw upon
* first mode set.
*/
@@ -751,6 +788,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
/* set function pointers */
nv_crtc->set_dither = nv50_crtc_set_dither;
nv_crtc->set_scale = nv50_crtc_set_scale;
+ nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index a0f2beb..55c5633 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -190,11 +190,8 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *connector->native_mode;
- adjusted_mode->base.id = id;
- }
+ connector->native_mode)
+ drm_mode_copy(adjusted_mode, connector->native_mode);
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7ba28e0..0e47a89 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -50,6 +50,29 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
+u32
+nv50_display_active_crtcs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mask = 0;
+ int i;
+
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 ||
+ dev_priv->chipset == 0xa0) {
+ for (i = 0; i < 2; i++)
+ mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
+ } else {
+ for (i = 0; i < 4; i++)
+ mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+ }
+
+ for (i = 0; i < 3; i++)
+ mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+
+ return mask & 3;
+}
+
static int
evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
{
@@ -840,9 +863,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
if (type == OUTPUT_DP) {
int link = !(dcb->dpconf.sor.link & 1);
if ((mc & 0x000f0000) == 0x00020000)
- nouveau_dp_tu_update(dev, or, link, pclk, 18);
+ nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
else
- nouveau_dp_tu_update(dev, or, link, pclk, 24);
+ nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
}
if (dcb->type != OUTPUT_ANALOG) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 95874f7..5d3dd14 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -74,6 +74,8 @@ void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+u32 nv50_display_active_crtcs(struct drm_device *);
+
int nv50_display_sync(struct drm_device *);
int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *chan);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index 3860ca6..771d879 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -104,7 +104,8 @@
#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000
+#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
+#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
#define NV50_EVO_CRTC_FB_POS 0x000008c0
#define NV50_EVO_CRTC_REAL_RES 0x000008c8
#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index ec5481d..d020ed4 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -28,6 +28,7 @@
#include "nouveau_hw.h"
#include "nouveau_pm.h"
#include "nouveau_hwsq.h"
+#include "nv50_display.h"
enum clk_src {
clk_src_crystal,
@@ -352,17 +353,13 @@ nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
}
struct nv50_pm_state {
+ struct nouveau_pm_level *perflvl;
+ struct hwsq_ucode eclk_hwsq;
struct hwsq_ucode mclk_hwsq;
u32 mscript;
-
- u32 emast;
- u32 nctrl;
- u32 ncoef;
- u32 sctrl;
- u32 scoef;
-
- u32 amast;
- u32 pdivs;
+ u32 mmast;
+ u32 mctrl;
+ u32 mcoef;
};
static u32
@@ -415,40 +412,153 @@ clk_same(u32 a, u32 b)
return ((a / 1000) == (b / 1000));
}
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+ struct nv50_pm_state *info = exec->priv;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+ hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+ struct nv50_pm_state *info = exec->priv;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+ hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ struct nv50_pm_state *info = exec->priv;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+ hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ struct nv50_pm_state *info = exec->priv;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+ hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+ struct nv50_pm_state *info = exec->priv;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+ if (nsec > 1000)
+ hwsq_usec(hwsq, (nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+ if (mr <= 1)
+ return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
+ if (mr <= 3)
+ return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
+ return 0;
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
+ struct nv50_pm_state *info = exec->priv;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+ if (mr <= 1) {
+ if (dev_priv->vram_rank_B)
+ hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
+ hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
+ } else
+ if (mr <= 3) {
+ if (dev_priv->vram_rank_B)
+ hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
+ hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
+ }
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+ struct nv50_pm_state *info = exec->priv;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+ u32 ctrl = nv_rd32(exec->dev, 0x004008);
+
+ info->mmast = nv_rd32(exec->dev, 0x00c040);
+ info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
+ info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
+
+ hwsq_wr32(hwsq, 0xc040, info->mmast);
+ hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
+ if (info->mctrl & 0x80000000)
+ hwsq_wr32(hwsq, 0x400c, info->mcoef);
+ hwsq_wr32(hwsq, 0x4008, info->mctrl);
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+ struct drm_device *dev = exec->dev;
+ struct nv50_pm_state *info = exec->priv;
+ struct nouveau_pm_level *perflvl = info->perflvl;
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ u32 reg = 0x100220 + (i * 4);
+ u32 val = nv_rd32(dev, reg);
+ if (val != perflvl->timing.reg[i])
+ hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
+ }
+}
+
static int
-calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
+calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ struct nv50_pm_state *info)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 crtc_mask = nv50_display_active_crtcs(dev);
+ struct nouveau_mem_exec_func exec = {
+ .dev = dev,
+ .precharge = mclk_precharge,
+ .refresh = mclk_refresh,
+ .refresh_auto = mclk_refresh_auto,
+ .refresh_self = mclk_refresh_self,
+ .wait = mclk_wait,
+ .mrg = mclk_mrg,
+ .mrs = mclk_mrs,
+ .clock_set = mclk_clock_set,
+ .timing_set = mclk_timing_set,
+ .priv = info
+ };
+ struct hwsq_ucode *hwsq = &info->mclk_hwsq;
struct pll_lims pll;
- u32 mast = nv_rd32(dev, 0x00c040);
- u32 ctrl = nv_rd32(dev, 0x004008);
- u32 coef = nv_rd32(dev, 0x00400c);
- u32 orig = ctrl;
- u32 crtc_mask = 0;
int N, M, P;
- int ret, i;
+ int ret;
/* use pcie refclock if possible, otherwise use mpll */
- ctrl &= ~0x81ff0200;
- if (clk_same(freq, read_clk(dev, clk_src_href))) {
- ctrl |= 0x00000200 | (pll.log2p_bias << 19);
+ info->mctrl = nv_rd32(dev, 0x004008);
+ info->mctrl &= ~0x81ff0200;
+ if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
+ info->mctrl |= 0x00000200 | (pll.log2p_bias << 19);
} else {
- ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P);
+ ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
if (ret == 0)
return -EINVAL;
- ctrl |= 0x80000000 | (P << 22) | (P << 16);
- ctrl |= pll.log2p_bias << 19;
- coef = (N << 8) | M;
- }
-
- mast &= ~0xc0000000; /* get MCLK_2 from HREF */
- mast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
-
- /* determine active crtcs */
- for (i = 0; i < 2; i++) {
- if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK)))
- crtc_mask |= (1 << i);
+ info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
+ info->mctrl |= pll.log2p_bias << 19;
+ info->mcoef = (N << 8) | M;
}
/* build the ucode which will reclock the memory for us */
@@ -462,25 +572,10 @@ calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
- /* prepare memory controller */
- hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
- hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */
- hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */
- hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */
-
- /* reclock memory */
- hwsq_wr32(hwsq, 0xc040, mast);
- hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */
- hwsq_wr32(hwsq, 0x400c, coef);
- hwsq_wr32(hwsq, 0x4008, ctrl);
-
- /* restart memory controller */
- hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
- hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */
- hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */
- hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */
-
- hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */
+ ret = nouveau_mem_exec(&exec, perflvl);
+ if (ret)
+ return ret;
+
hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
if (dev_priv->chipset >= 0x92)
@@ -494,10 +589,11 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_pm_state *info;
+ struct hwsq_ucode *hwsq;
struct pll_lims pll;
+ u32 out, mast, divs, ctrl;
int clk, ret = -EINVAL;
int N, M, P1, P2;
- u32 out;
if (dev_priv->chipset == 0xaa ||
dev_priv->chipset == 0xac)
@@ -506,54 +602,44 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
+ info->perflvl = perflvl;
- /* core: for the moment at least, always use nvpll */
- clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
- if (clk == 0)
- goto error;
+ /* memory: build hwsq ucode which we'll use to reclock memory.
+ * use pcie refclock if possible, otherwise use mpll */
+ info->mclk_hwsq.len = 0;
+ if (perflvl->memory) {
+ ret = calc_mclk(dev, perflvl, info);
+ if (ret)
+ goto error;
+ info->mscript = perflvl->memscript;
+ }
- info->emast = 0x00000003;
- info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
- info->ncoef = (N << 8) | M;
+ divs = read_div(dev);
+ mast = info->mmast;
- /* shader: tie to nvclk if possible, otherwise use spll. have to be
- * very careful that the shader clock is at least twice the core, or
- * some chipsets will be very unhappy. i expect most or all of these
- * cases will be handled by tying to nvclk, but it's possible there's
- * corners
- */
- if (P1-- && perflvl->shader == (perflvl->core << 1)) {
- info->emast |= 0x00000020;
- info->sctrl = 0x00000000 | (P1 << 19) | (P1 << 16);
- info->scoef = nv_rd32(dev, 0x004024);
- } else {
- clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
- if (clk == 0)
- goto error;
+ /* start building HWSQ script for engine reclocking */
+ hwsq = &info->eclk_hwsq;
+ hwsq_init(hwsq);
+ hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
- info->emast |= 0x00000030;
- info->sctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
- info->scoef = (N << 8) | M;
+ /* vdec/dom6: switch to "safe" clocks temporarily */
+ if (perflvl->vdec) {
+ mast &= ~0x00000c00;
+ divs &= ~0x00000700;
}
- /* memory: build hwsq ucode which we'll use to reclock memory */
- info->mclk_hwsq.len = 0;
- if (perflvl->memory) {
- clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq);
- if (clk < 0) {
- ret = clk;
- goto error;
- }
-
- info->mscript = perflvl->memscript;
+ if (perflvl->dom6) {
+ mast &= ~0x0c000000;
+ divs &= ~0x00000007;
}
+ hwsq_wr32(hwsq, 0x00c040, mast);
+
/* vdec: avoid modifying xpll until we know exactly how the other
* clock domains work, i suspect at least some of them can also be
* tied to xpll...
*/
- info->amast = nv_rd32(dev, 0x00c040);
- info->pdivs = read_div(dev);
if (perflvl->vdec) {
/* see how close we can get using nvclk as a source */
clk = calc_div(perflvl->core, perflvl->vdec, &P1);
@@ -566,16 +652,14 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
out = calc_div(out, perflvl->vdec, &P2);
/* select whichever gets us closest */
- info->amast &= ~0x00000c00;
- info->pdivs &= ~0x00000700;
if (abs((int)perflvl->vdec - clk) <=
abs((int)perflvl->vdec - out)) {
if (dev_priv->chipset != 0x98)
- info->amast |= 0x00000c00;
- info->pdivs |= P1 << 8;
+ mast |= 0x00000c00;
+ divs |= P1 << 8;
} else {
- info->amast |= 0x00000800;
- info->pdivs |= P2 << 8;
+ mast |= 0x00000800;
+ divs |= P2 << 8;
}
}
@@ -583,21 +667,82 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
* of the host clock frequency
*/
if (perflvl->dom6) {
- info->amast &= ~0x0c000000;
if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
- info->amast |= 0x00000000;
+ mast |= 0x00000000;
} else
if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
- info->amast |= 0x08000000;
+ mast |= 0x08000000;
} else {
clk = read_clk(dev, clk_src_hclk) * 3;
clk = calc_div(clk, perflvl->dom6, &P1);
- info->amast |= 0x0c000000;
- info->pdivs = (info->pdivs & ~0x00000007) | P1;
+ mast |= 0x0c000000;
+ divs |= P1;
}
}
+ /* vdec/dom6: complete switch to new clocks */
+ switch (dev_priv->chipset) {
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ hwsq_wr32(hwsq, 0x004800, divs);
+ break;
+ default:
+ hwsq_wr32(hwsq, 0x004700, divs);
+ break;
+ }
+
+ hwsq_wr32(hwsq, 0x00c040, mast);
+
+ /* core/shader: make sure sclk/nvclk are disconnected from their
+ * PLLs (nvclk to dom6, sclk to hclk)
+ */
+ if (dev_priv->chipset < 0x92)
+ mast = (mast & ~0x001000b0) | 0x00100080;
+ else
+ mast = (mast & ~0x000000b3) | 0x00000081;
+
+ hwsq_wr32(hwsq, 0x00c040, mast);
+
+ /* core: for the moment at least, always use nvpll */
+ clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+
+ ctrl = nv_rd32(dev, 0x004028) & ~0xc03f0100;
+ mast &= ~0x00100000;
+ mast |= 3;
+
+ hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
+ hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
+
+ /* shader: tie to nvclk if possible, otherwise use spll. have to be
+ * very careful that the shader clock is at least twice the core, or
+ * some chipsets will be very unhappy. i expect most or all of these
+ * cases will be handled by tying to nvclk, but it's possible there's
+ * corners
+ */
+ ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100;
+
+ if (P1-- && perflvl->shader == (perflvl->core << 1)) {
+ hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
+ hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
+ } else {
+ clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+ ctrl |= 0x80000000;
+
+ hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
+ hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
+ hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
+ }
+
+ hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
+ hwsq_fini(hwsq);
+
return info;
error:
kfree(info);
@@ -605,23 +750,24 @@ error:
}
static int
-prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
+prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 hwsq_data, hwsq_kick;
int i;
- if (dev_priv->chipset < 0x90) {
+ if (dev_priv->chipset < 0x94) {
hwsq_data = 0x001400;
hwsq_kick = 0x00000003;
} else {
hwsq_data = 0x080000;
hwsq_kick = 0x00000001;
}
-
/* upload hwsq ucode */
nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
nv_wr32(dev, 0x001304, 0x00000000);
+ if (dev_priv->chipset >= 0x92)
+ nv_wr32(dev, 0x001318, 0x00000000);
for (i = 0; i < hwsq->len / 4; i++)
nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
@@ -645,20 +791,19 @@ prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
int
nv50_pm_clocks_set(struct drm_device *dev, void *data)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_pm_state *info = data;
struct bit_entry M;
- int ret = 0;
+ int ret = -EBUSY;
/* halt and idle execution engines */
nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
- goto error;
+ goto resume;
+ if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f))
+ goto resume;
- /* memory: it is *very* important we change this first, the ucode
- * we build in pre() now has hardcoded 0xc040 values, which can't
- * change before we execute it or the engine clocks may end up
- * messed up.
+ /* program memory clock, if necessary - must come before engine clock
+ * reprogramming due to how we construct the hwsq scripts in pre()
*/
if (info->mclk_hwsq.len) {
/* execute some scripts that do ??? from the vbios.. */
@@ -672,42 +817,14 @@ nv50_pm_clocks_set(struct drm_device *dev, void *data)
nouveau_bios_init_exec(dev, info->mscript);
}
- ret = prog_mclk(dev, &info->mclk_hwsq);
+ ret = prog_hwsq(dev, &info->mclk_hwsq);
if (ret)
goto resume;
}
- /* reclock vdec/dom6 */
- nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000);
- switch (dev_priv->chipset) {
- case 0x92:
- case 0x94:
- case 0x96:
- nv_mask(dev, 0x004800, 0x00000707, info->pdivs);
- break;
- default:
- nv_mask(dev, 0x004700, 0x00000707, info->pdivs);
- break;
- }
- nv_mask(dev, 0x00c040, 0x0c000c00, info->amast);
+ /* program engine clocks */
+ ret = prog_hwsq(dev, &info->eclk_hwsq);
- /* core/shader: make sure sclk/nvclk are disconnected from their
- * plls (nvclk to dom6, sclk to hclk), modify the plls, and
- * reconnect sclk/nvclk to their new clock source
- */
- if (dev_priv->chipset < 0x92)
- nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */
- else
- nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081);
- nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl);
- nv_wr32(dev, 0x004024, info->scoef);
- nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl);
- nv_wr32(dev, 0x00402c, info->ncoef);
- nv_mask(dev, 0x00c040, 0x00100033, info->emast);
-
- goto resume;
-error:
- ret = -EBUSY;
resume:
nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
kfree(info);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c4423ba..a7844ab 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -36,6 +36,193 @@
#include "nouveau_crtc.h"
#include "nv50_display.h"
+static u32
+nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv50[] = { 16, 8, 0, 24 };
+ if (dev_priv->card_type == 0xaf)
+ return nvaf[lane];
+ return nv50[lane];
+}
+
+static void
+nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
+{
+ u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
+}
+
+static void
+nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
+ u8 lane, u8 swing, u8 preem)
+{
+ u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
+ u32 mask = 0x000000ff << shift;
+ u8 *table, *entry, *config;
+
+ table = nouveau_dp_bios_data(dev, dcb, &entry);
+ if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
+ NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
+ return;
+ }
+
+ config = entry + table[4];
+ while (config[0] != swing || config[1] != preem) {
+ config += table[5];
+ if (config >= entry + table[4] + entry[4] * table[5])
+ return;
+ }
+
+ nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
+ nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
+ nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
+}
+
+static void
+nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
+ int link_nr, u32 link_bw, bool enhframe)
+{
+ u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
+ u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)) & ~0x000c0000;
+ u8 *table, *entry, mask;
+ int i;
+
+ table = nouveau_dp_bios_data(dev, dcb, &entry);
+ if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
+ NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
+ return;
+ }
+
+ entry = ROMPTR(dev, entry[10]);
+ if (entry) {
+ while (link_bw < ROM16(entry[0]) * 10)
+ entry += 4;
+
+ nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
+ }
+
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (enhframe)
+ dpctrl |= 0x00004000;
+
+ if (link_bw > 162000)
+ clksor |= 0x00040000;
+
+ nv_wr32(dev, 0x614300 + (or * 0x800), clksor);
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), dpctrl);
+
+ mask = 0;
+ for (i = 0; i < link_nr; i++)
+ mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
+ nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
+}
+
+static void
+nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
+{
+ u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
+ u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800));
+ if (clksor & 0x000c0000)
+ *bw = 270000;
+ else
+ *bw = 162000;
+
+ if (dpctrl > 0x00030000) *nr = 4;
+ else if (dpctrl > 0x00010000) *nr = 2;
+ else *nr = 1;
+}
+
+void
+nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
+{
+ const u32 symbol = 100000;
+ int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+ int TU, VTUi, VTUf, VTUa;
+ u64 link_data_rate, link_ratio, unk;
+ u32 best_diff = 64 * symbol;
+ u32 link_nr, link_bw, r;
+
+ /* calculate packed data rate for each lane */
+ nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
+ link_data_rate = (clk * bpp / 8) / link_nr;
+
+ /* calculate ratio of packed data rate to link symbol rate */
+ link_ratio = link_data_rate * symbol;
+ r = do_div(link_ratio, link_bw);
+
+ for (TU = 64; TU >= 32; TU--) {
+ /* calculate average number of valid symbols in each TU */
+ u32 tu_valid = link_ratio * TU;
+ u32 calc, diff;
+
+ /* find a hw representation for the fraction.. */
+ VTUi = tu_valid / symbol;
+ calc = VTUi * symbol;
+ diff = tu_valid - calc;
+ if (diff) {
+ if (diff >= (symbol / 2)) {
+ VTUf = symbol / (symbol - diff);
+ if (symbol - (VTUf * diff))
+ VTUf++;
+
+ if (VTUf <= 15) {
+ VTUa = 1;
+ calc += symbol - (symbol / VTUf);
+ } else {
+ VTUa = 0;
+ VTUf = 1;
+ calc += symbol;
+ }
+ } else {
+ VTUa = 0;
+ VTUf = min((int)(symbol / diff), 15);
+ calc += symbol / VTUf;
+ }
+
+ diff = calc - tu_valid;
+ } else {
+ /* no remainder, but the hw doesn't like the fractional
+ * part to be zero. decrement the integer part and
+ * have the fraction add a whole symbol back
+ */
+ VTUa = 0;
+ VTUf = 1;
+ VTUi--;
+ }
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ bestTU = TU;
+ bestVTUa = VTUa;
+ bestVTUf = VTUf;
+ bestVTUi = VTUi;
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (!bestTU) {
+ NV_ERROR(dev, "DP: unable to find suitable config\n");
+ return;
+ }
+
+ /* XXX close to vbios numbers, but not right */
+ unk = (symbol - link_ratio) * bestTU;
+ unk *= link_ratio;
+ r = do_div(unk, symbol);
+ r = do_div(unk, symbol);
+ unk += 6;
+
+ nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
+ nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
+ bestVTUf << 16 |
+ bestVTUi << 8 |
+ unk);
+}
static void
nv50_sor_disconnect(struct drm_encoder *encoder)
{
@@ -117,20 +304,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
if (nv_encoder->dcb->type == OUTPUT_DP) {
- struct nouveau_i2c_chan *auxch;
-
- auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (!auxch)
- return;
+ struct dp_train_func func = {
+ .link_set = nv50_sor_dp_link_set,
+ .train_set = nv50_sor_dp_train_set,
+ .train_adj = nv50_sor_dp_train_adj
+ };
- if (mode == DRM_MODE_DPMS_ON) {
- u8 status = DP_SET_POWER_D0;
- nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
- nouveau_dp_link_train(encoder, nv_encoder->dp.datarate);
- } else {
- u8 status = DP_SET_POWER_D3;
- nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
- }
+ nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
}
}
@@ -162,11 +342,8 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *connector->native_mode;
- adjusted_mode->base.id = id;
- }
+ connector->native_mode)
+ drm_mode_copy(adjusted_mode, connector->native_mode);
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 6f38cea..44fbac9 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -57,27 +57,15 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
-nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
{
- struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
-
phys |= 1; /* present */
phys |= (u64)memtype << 40;
-
- /* IGPs don't have real VRAM, re-target to stolen system memory */
- if (target == 0 && dev_priv->vram_sys_base) {
- phys += dev_priv->vram_sys_base;
- target = 3;
- }
-
phys |= target << 4;
-
if (vma->access & NV_MEM_ACCESS_SYS)
phys |= (1 << 6);
-
if (!(vma->access & NV_MEM_ACCESS_WO))
phys |= (1 << 3);
-
return phys;
}
@@ -85,11 +73,19 @@ void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
+ struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
u32 comp = (mem->memtype & 0x180) >> 7;
- u32 block;
+ u32 block, target;
int i;
- phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
+ /* IGPs don't have real VRAM, re-target to stolen system memory */
+ target = 0;
+ if (dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ target = 3;
+ }
+
+ phys = vm_addr(vma, phys, mem->memtype, target);
pte <<= 3;
cnt <<= 3;
@@ -125,9 +121,10 @@ void
nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
+ u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
pte <<= 3;
while (cnt--) {
- u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
+ u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 2e45e57..9ed9ae39 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -189,8 +189,25 @@ nv50_vram_init(struct drm_device *dev)
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 pfb714 = nv_rd32(dev, 0x100714);
u32 rblock, length;
+ switch (pfb714 & 0x00000007) {
+ case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 1:
+ if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR3;
+ else
+ dev_priv->vram_type = NV_MEM_TYPE_DDR2;
+ break;
+ case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
+ case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
+ default:
+ break;
+ }
+
+ dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
dev_priv->vram_size = nv_rd32(dev, 0x10020c);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ULL;
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index e9992f6..ce65f81 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -269,7 +269,7 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
clk0 = calc_div(dev, clk, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
- if (clk0 != freq) {
+ if (clk0 != freq && (0x00004387 & (1 << clk))) {
if (clk < 7)
clk1 = calc_pll(dev, clk, freq, &info->coef);
else
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 9e35294..30d2bd5 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -77,9 +77,11 @@ void
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
+ u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
+
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
+ u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index ce984d5..a7eef89 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -106,31 +106,32 @@ nvc0_vram_init(struct drm_device *dev)
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(dev, 0x121c74);
+ u32 parts = nv_rd32(dev, 0x022438);
+ u32 pmask = nv_rd32(dev, 0x022554);
u32 bsize = nv_rd32(dev, 0x10f20c);
u32 offset, length;
bool uniform = true;
int ret, part;
NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
- NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
+ NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+ dev_priv->vram_type = nouveau_mem_vbios_type(dev);
+ dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
/* read amount of vram attached to each memory controller */
- part = 0;
- while (parts) {
- u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
- if (psize == 0)
- continue;
- parts--;
-
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
+ for (part = 0; part < parts; part++) {
+ if (!(pmask & (1 << part))) {
+ u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
+ }
+
+ NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
+ dev_priv->vram_size += (u64)psize << 20;
}
-
- NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
- dev_priv->vram_size += (u64)psize << 20;
}
/* if all controllers have the same amount attached, there's no holes */
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index d2ba2f0..dfb8a95 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -284,6 +284,8 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
u32 *push;
int ret;
+ evo_sync(crtc->dev, EVO_MASTER);
+
swap_interval <<= 4;
if (swap_interval == 0)
swap_interval |= 0x100;
@@ -593,7 +595,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
evo_kick(push, crtc->dev, EVO_MASTER);
}
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false);
+ nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
}
@@ -634,8 +636,7 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
u32 vblan2e = 0, vblan2s = 1;
- u32 magic = 0x31ec6000;
- u32 syncs, *push;
+ u32 *push;
int ret;
hactive = mode->htotal;
@@ -655,15 +656,8 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
vblan2e = vactive + vsynce + vbackp;
vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
vactive = (vactive * 2) + 1;
- magic |= 0x00000001;
}
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
ret = nvd0_crtc_swap_fbs(crtc, old_fb);
if (ret)
return ret;
@@ -683,9 +677,6 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
evo_data(push, mode->clock * 1000);
evo_data(push, 0x00200000); /* ??? */
evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
evo_data(push, 0x00000311);
evo_data(push, 0x00000100);
@@ -959,11 +950,6 @@ nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
static void
-nvd0_dac_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void
nvd0_dac_commit(struct drm_encoder *encoder)
{
}
@@ -974,13 +960,26 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- u32 *push;
+ u32 syncs, magic, *push;
+
+ syncs = 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ magic = 0x31ec6000 | (nv_crtc->index << 25);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(encoder->dev, EVO_MASTER, 4);
+ push = evo_wait(encoder->dev, EVO_MASTER, 8);
if (push) {
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs);
+ evo_data(push, magic);
+ evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
evo_data(push, 1 << nv_crtc->index);
evo_data(push, 0x00ff);
evo_kick(push, encoder->dev, EVO_MASTER);
@@ -1043,7 +1042,7 @@ nvd0_dac_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
.dpms = nvd0_dac_dpms,
.mode_fixup = nvd0_dac_mode_fixup,
- .prepare = nvd0_dac_prepare,
+ .prepare = nvd0_dac_disconnect,
.commit = nvd0_dac_commit,
.mode_set = nvd0_dac_mode_set,
.disable = nvd0_dac_disconnect,
@@ -1183,6 +1182,143 @@ nvd0_hdmi_disconnect(struct drm_encoder *encoder)
/******************************************************************************
* SOR
*****************************************************************************/
+static inline u32
+nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
+{
+ static const u8 nvd0[] = { 16, 8, 0, 24 };
+ return nvd0[lane];
+}
+
+static void
+nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
+{
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+}
+
+static void
+nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
+ u8 lane, u8 swing, u8 preem)
+{
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
+ u32 mask = 0x000000ff << shift;
+ u8 *table, *entry, *config = NULL;
+
+ switch (swing) {
+ case 0: preem += 0; break;
+ case 1: preem += 4; break;
+ case 2: preem += 7; break;
+ case 3: preem += 9; break;
+ }
+
+ table = nouveau_dp_bios_data(dev, dcb, &entry);
+ if (table) {
+ if (table[0] == 0x30) {
+ config = entry + table[4];
+ config += table[5] * preem;
+ }
+ }
+
+ if (!config) {
+ NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
+ return;
+ }
+
+ nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift);
+ nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift);
+ nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
+ nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000);
+}
+
+static void
+nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
+ int link_nr, u32 link_bw, bool enhframe)
+{
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000;
+ u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000;
+ u32 script = 0x0000, lane_mask = 0;
+ u8 *table, *entry;
+ int i;
+
+ link_bw /= 27000;
+
+ table = nouveau_dp_bios_data(dev, dcb, &entry);
+ if (table) {
+ if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
+ else entry = NULL;
+
+ while (entry) {
+ if (entry[0] >= link_bw)
+ break;
+ entry += 3;
+ }
+
+ nouveau_bios_run_init_table(dev, script, dcb, crtc);
+ }
+
+ clksor |= link_bw << 18;
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (enhframe)
+ dpctrl |= 0x00004000;
+
+ for (i = 0; i < link_nr; i++)
+ lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
+
+ nv_wr32(dev, 0x612300 + soff, clksor);
+ nv_wr32(dev, 0x61c10c + loff, dpctrl);
+ nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask);
+}
+
+static void
+nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb,
+ u32 *link_nr, u32 *link_bw)
+{
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000;
+ u32 clksor = nv_rd32(dev, 0x612300 + soff);
+
+ if (dpctrl > 0x00030000) *link_nr = 4;
+ else if (dpctrl > 0x00010000) *link_nr = 2;
+ else *link_nr = 1;
+
+ *link_bw = (clksor & 0x007c0000) >> 18;
+ *link_bw *= 27000;
+}
+
+static void
+nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb,
+ u32 crtc, u32 datarate)
+{
+ const u32 symbol = 100000;
+ const u32 TU = 64;
+ u32 link_nr, link_bw;
+ u64 ratio, value;
+
+ nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
+
+ ratio = datarate;
+ ratio *= symbol;
+ do_div(ratio, link_nr * link_bw);
+
+ value = (symbol - ratio) * TU;
+ value *= ratio;
+ do_div(value, symbol);
+ do_div(value, symbol);
+
+ value += 5;
+ value |= 0x08000000;
+
+ nv_wr32(dev, 0x616610 + (crtc * 0x800), value);
+}
+
static void
nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
{
@@ -1215,6 +1351,16 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
+
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ struct dp_train_func func = {
+ .link_set = nvd0_sor_dp_link_set,
+ .train_set = nvd0_sor_dp_train_set,
+ .train_adj = nvd0_sor_dp_train_adj
+ };
+
+ nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
+ }
}
static bool
@@ -1237,8 +1383,37 @@ nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
static void
+nvd0_sor_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nvd0_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(dev, EVO_MASTER, 4);
+ if (push) {
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, dev, EVO_MASTER);
+ }
+
+ nvd0_hdmi_disconnect(encoder);
+
+ nv_encoder->crtc = NULL;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ }
+}
+
+static void
nvd0_sor_prepare(struct drm_encoder *encoder)
{
+ nvd0_sor_disconnect(encoder);
+ if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP)
+ evo_sync(encoder->dev, EVO_MASTER);
}
static void
@@ -1257,7 +1432,18 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
struct nouveau_connector *nv_connector;
struct nvbios *bios = &dev_priv->vbios;
u32 mode_ctrl = (1 << nv_crtc->index);
- u32 *push, or_config;
+ u32 syncs, magic, *push;
+ u32 or_config;
+
+ syncs = 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ magic = 0x31ec6000 | (nv_crtc->index << 25);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
switch (nv_encoder->dcb->type) {
@@ -1306,6 +1492,22 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
}
break;
+ case OUTPUT_DP:
+ if (nv_connector->base.display_info.bpc == 6) {
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ syncs |= 0x00000140;
+ } else {
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ syncs |= 0x00000180;
+ }
+
+ if (nv_encoder->dcb->sorconf.link & 1)
+ mode_ctrl |= 0x00000800;
+ else
+ mode_ctrl |= 0x00000900;
+
+ or_config = (mode_ctrl & 0x00000f00) >> 8;
+ break;
default:
BUG_ON(1);
break;
@@ -1313,9 +1515,17 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(dev, EVO_MASTER, 4);
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
+ nv_encoder->dp.datarate);
+ }
+
+ push = evo_wait(dev, EVO_MASTER, 8);
if (push) {
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs);
+ evo_data(push, magic);
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
evo_data(push, mode_ctrl);
evo_data(push, or_config);
evo_kick(push, dev, EVO_MASTER);
@@ -1325,32 +1535,6 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
}
static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nvd0_hdmi_disconnect(encoder);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- }
-}
-
-static void
nvd0_sor_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -1402,17 +1586,19 @@ static struct dcb_entry *
lookup_dcb(struct drm_device *dev, int id, u32 mc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int type, or, i;
+ int type, or, i, link = -1;
if (id < 4) {
type = OUTPUT_ANALOG;
or = id;
} else {
switch (mc & 0x00000f00) {
- case 0x00000000: type = OUTPUT_LVDS; break;
- case 0x00000100: type = OUTPUT_TMDS; break;
- case 0x00000200: type = OUTPUT_TMDS; break;
- case 0x00000500: type = OUTPUT_TMDS; break;
+ case 0x00000000: link = 0; type = OUTPUT_LVDS; break;
+ case 0x00000100: link = 0; type = OUTPUT_TMDS; break;
+ case 0x00000200: link = 1; type = OUTPUT_TMDS; break;
+ case 0x00000500: link = 0; type = OUTPUT_TMDS; break;
+ case 0x00000800: link = 0; type = OUTPUT_DP; break;
+ case 0x00000900: link = 1; type = OUTPUT_DP; break;
default:
NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc);
return NULL;
@@ -1423,7 +1609,8 @@ lookup_dcb(struct drm_device *dev, int id, u32 mc)
for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)))
+ if (dcb->type == type && (dcb->or & (1 << or)) &&
+ (link < 0 || link == !(dcb->sorconf.link & 1)))
return dcb;
}
@@ -1498,6 +1685,7 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
break;
case OUTPUT_TMDS:
case OUTPUT_LVDS:
+ case OUTPUT_DP:
if (cfg & 0x00000100)
tmp = 0x00000101;
else
@@ -1548,7 +1736,7 @@ nvd0_display_bh(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
struct nvd0_display *disp = nvd0_display(dev);
- u32 mask, crtc;
+ u32 mask = 0, crtc = ~0;
int i;
if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
@@ -1564,12 +1752,8 @@ nvd0_display_bh(unsigned long data)
}
}
- mask = nv_rd32(dev, 0x6101d4);
- crtc = 0;
- if (!mask) {
- mask = nv_rd32(dev, 0x6109d4);
- crtc = 1;
- }
+ while (!mask && ++crtc < dev->mode_config.num_crtc)
+ mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800));
if (disp->modeset & 0x00000001)
nvd0_display_unk1_handler(dev, crtc, mask);
@@ -1584,6 +1768,7 @@ nvd0_display_intr(struct drm_device *dev)
{
struct nvd0_display *disp = nvd0_display(dev);
u32 intr = nv_rd32(dev, 0x610088);
+ int i;
if (intr & 0x00000001) {
u32 stat = nv_rd32(dev, 0x61008c);
@@ -1628,16 +1813,13 @@ nvd0_display_intr(struct drm_device *dev)
intr &= ~0x00100000;
}
- if (intr & 0x01000000) {
- u32 stat = nv_rd32(dev, 0x6100bc);
- nv_wr32(dev, 0x6100bc, stat);
- intr &= ~0x01000000;
- }
-
- if (intr & 0x02000000) {
- u32 stat = nv_rd32(dev, 0x6108bc);
- nv_wr32(dev, 0x6108bc, stat);
- intr &= ~0x02000000;
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ u32 mask = 0x01000000 << i;
+ if (intr & mask) {
+ u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
+ nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
+ intr &= ~mask;
+ }
}
if (intr)
@@ -1774,7 +1956,7 @@ nvd0_display_create(struct drm_device *dev)
struct pci_dev *pdev = dev->pdev;
struct nvd0_display *disp;
struct dcb_entry *dcbe;
- int ret, i;
+ int crtcs, ret, i;
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
@@ -1782,7 +1964,8 @@ nvd0_display_create(struct drm_device *dev)
dev_priv->engine.display.priv = disp;
/* create crtc objects to represent the hw heads */
- for (i = 0; i < 2; i++) {
+ crtcs = nv_rd32(dev, 0x022448);
+ for (i = 0; i < crtcs; i++) {
ret = nvd0_crtc_create(dev, i);
if (ret)
goto out;
@@ -1803,6 +1986,7 @@ nvd0_display_create(struct drm_device *dev)
switch (dcbe->type) {
case OUTPUT_TMDS:
case OUTPUT_LVDS:
+ case OUTPUT_DP:
nvd0_sor_create(connector, dcbe);
break;
case OUTPUT_ANALOG:
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 6a5f439..88718fa 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -85,6 +85,7 @@ static struct drm_driver driver = {
int r128_driver_load(struct drm_device *dev, unsigned long flags)
{
+ pci_set_master(dev->pdev);
return drm_vblank_init(dev, 1);
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 2139fe8..8410415 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -71,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
- radeon_semaphore.o radeon_sa.o
+ radeon_semaphore.o radeon_sa.o atombios_i2c.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 742f17f..72672ea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1031,6 +1031,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ unsigned bankw, bankh, mtaspect, tile_split;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
@@ -1121,20 +1122,13 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
break;
}
- switch ((tmp & 0xf000) >> 12) {
- case 0: /* 1KB rows */
- default:
- fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
- break;
- case 1: /* 2KB rows */
- fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
- break;
- case 2: /* 4KB rows */
- fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
- break;
- }
-
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+
+ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+ fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+ fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+ fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 552b436..191218a 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -746,7 +746,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
- if (dp_info->dpcd[0] >= 0x11)
+ if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
+ dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
new file mode 100644
index 0000000..44d87b6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ *
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+#define TARGET_HW_I2C_CLOCK 50
+
+/* these are a limitation of ProcessI2cChannelTransaction not the hw */
+#define ATOM_MAX_HW_I2C_WRITE 2
+#define ATOM_MAX_HW_I2C_READ 255
+
+static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+ u8 slave_addr, u8 flags,
+ u8 *buf, u8 num)
+{
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+ unsigned char *base;
+ u16 out;
+
+ memset(&args, 0, sizeof(args));
+
+ base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+ if (flags & HW_I2C_WRITE) {
+ if (num > ATOM_MAX_HW_I2C_WRITE) {
+ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+ return -EINVAL;
+ }
+ memcpy(&out, buf, num);
+ args.lpI2CDataOut = cpu_to_le16(out);
+ } else {
+ if (num > ATOM_MAX_HW_I2C_READ) {
+ DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
+ return -EINVAL;
+ }
+ }
+
+ args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+ args.ucRegIndex = 0;
+ args.ucTransBytes = num;
+ args.ucSlaveAddr = slave_addr << 1;
+ args.ucLineNumber = chan->rec.i2c_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* error */
+ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("hw_i2c error\n");
+ return -EIO;
+ }
+
+ if (!(flags & HW_I2C_WRITE))
+ memcpy(buf, base, num);
+
+ return 0;
+}
+
+int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct i2c_msg *p;
+ int i, remaining, current_count, buffer_offset, max_bytes, ret;
+ u8 buf = 0, flags;
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ ret = radeon_process_i2c_ch(i2c,
+ p->addr, HW_I2C_WRITE,
+ &buf, 1);
+ if (ret)
+ return ret;
+ else
+ return num;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ remaining = p->len;
+ buffer_offset = 0;
+ /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
+ if (p->flags & I2C_M_RD) {
+ max_bytes = ATOM_MAX_HW_I2C_READ;
+ flags = HW_I2C_READ;
+ } else {
+ max_bytes = ATOM_MAX_HW_I2C_WRITE;
+ flags = HW_I2C_WRITE;
+ }
+ while (remaining) {
+ if (remaining > max_bytes)
+ current_count = max_bytes;
+ else
+ current_count = remaining;
+ ret = radeon_process_i2c_ch(i2c,
+ p->addr, flags,
+ &p->buf[buffer_offset], current_count);
+ if (ret)
+ return ret;
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ }
+
+ return num;
+}
+
+u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f58254a..466db41 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -43,6 +43,37 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
+void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+ unsigned *bankh, unsigned *mtaspect,
+ unsigned *tile_split)
+{
+ *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+ *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+ *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+ *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+ switch (*bankw) {
+ default:
+ case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
+ case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
+ case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
+ case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
+ }
+ switch (*bankh) {
+ default:
+ case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
+ case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
+ case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
+ case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
+ }
+ switch (*mtaspect) {
+ default:
+ case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
+ case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
+ case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
+ case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
+ }
+}
+
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
u16 ctl, v;
@@ -68,6 +99,25 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
+void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ int i;
+
+ if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
+ break;
+ udelay(1);
+ }
+ }
+}
+
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -1489,7 +1539,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
evergreen_cp_start(rdev);
ring->ready = true;
- r = radeon_ring_test(rdev, ring);
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
ring->ready = false;
return r;
@@ -3147,7 +3197,7 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
+ rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -3187,7 +3237,7 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2379849..4e83fdc 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -32,17 +32,7 @@
#include "evergreend.h"
#include "evergreen_blit_shaders.h"
#include "cayman_blit_shaders.h"
-
-#define DI_PT_RECTLIST 0x11
-#define DI_INDEX_SIZE_16_BIT 0x0
-#define DI_SRC_SEL_AUTO_INDEX 0x2
-
-#define FMT_8 0x1
-#define FMT_5_6_5 0x8
-#define FMT_8_8_8_8 0x1a
-#define COLOR_8 0x1
-#define COLOR_5_6_5 0x8
-#define COLOR_8_8_8_8 0x1a
+#include "radeon_blit_common.h"
/* emits 17 */
static void
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 8e8cd85..a58b37a 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -31,6 +31,9 @@
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#define MIN(a,b) (((a)<(b))?(a):(b))
+
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -40,42 +43,43 @@ struct evergreen_cs_track {
u32 npipes;
u32 row_size;
/* value we track */
- u32 nsamples;
- u32 cb_color_base_last[12];
+ u32 nsamples; /* unused */
struct radeon_bo *cb_color_bo[12];
u32 cb_color_bo_offset[12];
- struct radeon_bo *cb_color_fmask_bo[8];
- struct radeon_bo *cb_color_cmask_bo[8];
+ struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
+ struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
u32 cb_color_info[12];
u32 cb_color_view[12];
- u32 cb_color_pitch_idx[12];
- u32 cb_color_slice_idx[12];
- u32 cb_color_dim_idx[12];
- u32 cb_color_dim[12];
u32 cb_color_pitch[12];
u32 cb_color_slice[12];
- u32 cb_color_cmask_slice[8];
- u32 cb_color_fmask_slice[8];
+ u32 cb_color_attrib[12];
+ u32 cb_color_cmask_slice[8];/* unused */
+ u32 cb_color_fmask_slice[8];/* unused */
u32 cb_target_mask;
- u32 cb_shader_mask;
+ u32 cb_shader_mask; /* unused */
u32 vgt_strmout_config;
u32 vgt_strmout_buffer_config;
+ struct radeon_bo *vgt_strmout_bo[4];
+ u32 vgt_strmout_bo_offset[4];
+ u32 vgt_strmout_size[4];
u32 db_depth_control;
u32 db_depth_view;
+ u32 db_depth_slice;
u32 db_depth_size;
- u32 db_depth_size_idx;
u32 db_z_info;
- u32 db_z_idx;
u32 db_z_read_offset;
u32 db_z_write_offset;
struct radeon_bo *db_z_read_bo;
struct radeon_bo *db_z_write_bo;
u32 db_s_info;
- u32 db_s_idx;
u32 db_s_read_offset;
u32 db_s_write_offset;
struct radeon_bo *db_s_read_bo;
struct radeon_bo *db_s_write_bo;
+ bool sx_misc_kill_all_prims;
+ bool cb_dirty;
+ bool db_dirty;
+ bool streamout_dirty;
};
static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -103,19 +107,6 @@ static u32 evergreen_cs_get_num_banks(u32 nbanks)
}
}
-static u32 evergreen_cs_get_tile_split(u32 row_size)
-{
- switch (row_size) {
- case 1:
- default:
- return ADDR_SURF_TILE_SPLIT_1KB;
- case 2:
- return ADDR_SURF_TILE_SPLIT_2KB;
- case 4:
- return ADDR_SURF_TILE_SPLIT_4KB;
- }
-}
-
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -128,50 +119,745 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
}
for (i = 0; i < 12; i++) {
- track->cb_color_base_last[i] = 0;
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
track->cb_color_info[i] = 0;
- track->cb_color_view[i] = 0;
- track->cb_color_pitch_idx[i] = 0;
- track->cb_color_slice_idx[i] = 0;
- track->cb_color_dim[i] = 0;
+ track->cb_color_view[i] = 0xFFFFFFFF;
track->cb_color_pitch[i] = 0;
track->cb_color_slice[i] = 0;
- track->cb_color_dim[i] = 0;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
+ track->cb_dirty = true;
track->db_depth_view = 0xFFFFC000;
track->db_depth_size = 0xFFFFFFFF;
- track->db_depth_size_idx = 0;
track->db_depth_control = 0xFFFFFFFF;
track->db_z_info = 0xFFFFFFFF;
- track->db_z_idx = 0xFFFFFFFF;
track->db_z_read_offset = 0xFFFFFFFF;
track->db_z_write_offset = 0xFFFFFFFF;
track->db_z_read_bo = NULL;
track->db_z_write_bo = NULL;
track->db_s_info = 0xFFFFFFFF;
- track->db_s_idx = 0xFFFFFFFF;
track->db_s_read_offset = 0xFFFFFFFF;
track->db_s_write_offset = 0xFFFFFFFF;
track->db_s_read_bo = NULL;
track->db_s_write_bo = NULL;
+ track->db_dirty = true;
+
+ for (i = 0; i < 4; i++) {
+ track->vgt_strmout_size[i] = 0;
+ track->vgt_strmout_bo[i] = NULL;
+ track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+ }
+ track->streamout_dirty = true;
+ track->sx_misc_kill_all_prims = false;
}
-static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+struct eg_surface {
+ /* value gathered from cs */
+ unsigned nbx;
+ unsigned nby;
+ unsigned format;
+ unsigned mode;
+ unsigned nbanks;
+ unsigned bankw;
+ unsigned bankh;
+ unsigned tsplit;
+ unsigned mtilea;
+ unsigned nsamples;
+ /* output value */
+ unsigned bpe;
+ unsigned layer_size;
+ unsigned palign;
+ unsigned halign;
+ unsigned long base_align;
+};
+
+static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+ surf->base_align = surf->bpe;
+ surf->palign = 1;
+ surf->halign = 1;
+ return 0;
+}
+
+static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
{
struct evergreen_cs_track *track = p->track;
+ unsigned palign;
- /* we don't support stream out buffer yet */
- if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
- dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ palign = MAX(64, track->group_size / surf->bpe);
+ surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+ surf->base_align = track->group_size;
+ surf->palign = palign;
+ surf->halign = 1;
+ if (surf->nbx & (palign - 1)) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+ __func__, __LINE__, prefix, surf->nbx, palign);
+ }
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned palign;
+
+ palign = track->group_size / (8 * surf->bpe * surf->nsamples);
+ palign = MAX(8, palign);
+ surf->layer_size = surf->nbx * surf->nby * surf->bpe;
+ surf->base_align = track->group_size;
+ surf->palign = palign;
+ surf->halign = 8;
+ if ((surf->nbx & (palign - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
+ __func__, __LINE__, prefix, surf->nbx, palign,
+ track->group_size, surf->bpe, surf->nsamples);
+ }
+ return -EINVAL;
+ }
+ if ((surf->nby & (8 - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
+ __func__, __LINE__, prefix, surf->nby);
+ }
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned palign, halign, tileb, slice_pt;
+
+ tileb = 64 * surf->bpe * surf->nsamples;
+ palign = track->group_size / (8 * surf->bpe * surf->nsamples);
+ palign = MAX(8, palign);
+ slice_pt = 1;
+ if (tileb > surf->tsplit) {
+ slice_pt = tileb / surf->tsplit;
+ }
+ tileb = tileb / slice_pt;
+ /* macro tile width & height */
+ palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
+ halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
+ surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
+ surf->base_align = (palign / 8) * (halign / 8) * tileb;
+ surf->palign = palign;
+ surf->halign = halign;
+
+ if ((surf->nbx & (palign - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+ __func__, __LINE__, prefix, surf->nbx, palign);
+ }
+ return -EINVAL;
+ }
+ if ((surf->nby & (halign - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
+ __func__, __LINE__, prefix, surf->nby, halign);
+ }
return -EINVAL;
}
- /* XXX fill in */
+ return 0;
+}
+
+static int evergreen_surface_check(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ /* some common value computed here */
+ surf->bpe = r600_fmt_get_blocksize(surf->format);
+
+ switch (surf->mode) {
+ case ARRAY_LINEAR_GENERAL:
+ return evergreen_surface_check_linear(p, surf, prefix);
+ case ARRAY_LINEAR_ALIGNED:
+ return evergreen_surface_check_linear_aligned(p, surf, prefix);
+ case ARRAY_1D_TILED_THIN1:
+ return evergreen_surface_check_1d(p, surf, prefix);
+ case ARRAY_2D_TILED_THIN1:
+ return evergreen_surface_check_2d(p, surf, prefix);
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+ __func__, __LINE__, prefix, surf->mode);
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ switch (surf->mode) {
+ case ARRAY_2D_TILED_THIN1:
+ break;
+ case ARRAY_LINEAR_GENERAL:
+ case ARRAY_LINEAR_ALIGNED:
+ case ARRAY_1D_TILED_THIN1:
+ return 0;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+ __func__, __LINE__, prefix, surf->mode);
+ return -EINVAL;
+ }
+
+ switch (surf->nbanks) {
+ case 0: surf->nbanks = 2; break;
+ case 1: surf->nbanks = 4; break;
+ case 2: surf->nbanks = 8; break;
+ case 3: surf->nbanks = 16; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
+ __func__, __LINE__, prefix, surf->nbanks);
+ return -EINVAL;
+ }
+ switch (surf->bankw) {
+ case 0: surf->bankw = 1; break;
+ case 1: surf->bankw = 2; break;
+ case 2: surf->bankw = 4; break;
+ case 3: surf->bankw = 8; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
+ __func__, __LINE__, prefix, surf->bankw);
+ return -EINVAL;
+ }
+ switch (surf->bankh) {
+ case 0: surf->bankh = 1; break;
+ case 1: surf->bankh = 2; break;
+ case 2: surf->bankh = 4; break;
+ case 3: surf->bankh = 8; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
+ __func__, __LINE__, prefix, surf->bankh);
+ return -EINVAL;
+ }
+ switch (surf->mtilea) {
+ case 0: surf->mtilea = 1; break;
+ case 1: surf->mtilea = 2; break;
+ case 2: surf->mtilea = 4; break;
+ case 3: surf->mtilea = 8; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
+ __func__, __LINE__, prefix, surf->mtilea);
+ return -EINVAL;
+ }
+ switch (surf->tsplit) {
+ case 0: surf->tsplit = 64; break;
+ case 1: surf->tsplit = 128; break;
+ case 2: surf->tsplit = 256; break;
+ case 3: surf->tsplit = 512; break;
+ case 4: surf->tsplit = 1024; break;
+ case 5: surf->tsplit = 2048; break;
+ case 6: surf->tsplit = 4096; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
+ __func__, __LINE__, prefix, surf->tsplit);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
+{
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+ unsigned long offset;
+ int r;
+
+ mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
+ pitch = track->cb_color_pitch[id];
+ slice = track->cb_color_slice[id];
+ surf.nbx = (pitch + 1) * 8;
+ surf.nby = ((slice + 1) * 64) / surf.nbx;
+ surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
+ surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
+ surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
+ surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
+ surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
+ surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
+ surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
+ surf.nsamples = 1;
+
+ if (!r600_fmt_is_valid_color(surf.format)) {
+ dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
+ __func__, __LINE__, surf.format,
+ id, track->cb_color_info[id]);
+ return -EINVAL;
+ }
+
+ r = evergreen_surface_value_conv_check(p, &surf, "cb");
+ if (r) {
+ return r;
+ }
+
+ r = evergreen_surface_check(p, &surf, "cb");
+ if (r) {
+ dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, id, track->cb_color_pitch[id],
+ track->cb_color_slice[id], track->cb_color_attrib[id],
+ track->cb_color_info[id]);
+ return r;
+ }
+
+ offset = track->cb_color_bo_offset[id] << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, id, offset, surf.base_align);
+ return -EINVAL;
+ }
+
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+ dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
+ "offset %d, max layer %d, bo size %ld, slice %d)\n",
+ __func__, __LINE__, id, surf.layer_size,
+ track->cb_color_bo_offset[id] << 8, mslice,
+ radeon_bo_size(track->cb_color_bo[id]), slice);
+ dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+ __func__, __LINE__, surf.nbx, surf.nby,
+ surf.mode, surf.bpe, surf.nsamples,
+ surf.bankw, surf.bankh,
+ surf.tsplit, surf.mtilea);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+ unsigned long offset;
+ int r;
+
+ mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+ pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+ slice = track->db_depth_slice;
+ surf.nbx = (pitch + 1) * 8;
+ surf.nby = ((slice + 1) * 64) / surf.nbx;
+ surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+ surf.format = G_028044_FORMAT(track->db_s_info);
+ surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
+ surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+ surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+ surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+ surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+ surf.nsamples = 1;
+
+ if (surf.format != 1) {
+ dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
+ __func__, __LINE__, surf.format);
+ return -EINVAL;
+ }
+ /* replace by color format so we can use same code */
+ surf.format = V_028C70_COLOR_8;
+
+ r = evergreen_surface_value_conv_check(p, &surf, "stencil");
+ if (r) {
+ return r;
+ }
+
+ r = evergreen_surface_check(p, &surf, NULL);
+ if (r) {
+ /* old userspace doesn't compute proper depth/stencil alignment
+ * check that alignment against a bigger byte per elements and
+ * only report if that alignment is wrong too.
+ */
+ surf.format = V_028C70_COLOR_8_8_8_8;
+ r = evergreen_surface_check(p, &surf, "stencil");
+ if (r) {
+ dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_s_info, track->db_z_info);
+ }
+ return r;
+ }
+
+ offset = track->db_s_read_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_s_read_bo)) {
+ dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_s_read_offset << 8, mslice,
+ radeon_bo_size(track->db_s_read_bo));
+ dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_s_info, track->db_z_info);
+ return -EINVAL;
+ }
+
+ offset = track->db_s_write_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_s_write_bo)) {
+ dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_s_write_offset << 8, mslice,
+ radeon_bo_size(track->db_s_write_bo));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+ unsigned long offset;
+ int r;
+
+ mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+ pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+ slice = track->db_depth_slice;
+ surf.nbx = (pitch + 1) * 8;
+ surf.nby = ((slice + 1) * 64) / surf.nbx;
+ surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+ surf.format = G_028040_FORMAT(track->db_z_info);
+ surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
+ surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+ surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+ surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+ surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+ surf.nsamples = 1;
+
+ switch (surf.format) {
+ case V_028040_Z_16:
+ surf.format = V_028C70_COLOR_16;
+ break;
+ case V_028040_Z_24:
+ case V_028040_Z_32_FLOAT:
+ surf.format = V_028C70_COLOR_8_8_8_8;
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d depth invalid format %d\n",
+ __func__, __LINE__, surf.format);
+ return -EINVAL;
+ }
+
+ r = evergreen_surface_value_conv_check(p, &surf, "depth");
+ if (r) {
+ dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_z_info);
+ return r;
+ }
+
+ r = evergreen_surface_check(p, &surf, "depth");
+ if (r) {
+ dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_z_info);
+ return r;
+ }
+
+ offset = track->db_z_read_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_z_read_bo)) {
+ dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_z_read_offset << 8, mslice,
+ radeon_bo_size(track->db_z_read_bo));
+ return -EINVAL;
+ }
+
+ offset = track->db_z_write_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_z_write_bo)) {
+ dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_z_write_offset << 8, mslice,
+ radeon_bo_size(track->db_z_write_bo));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap,
+ unsigned idx)
+{
+ struct eg_surface surf;
+ unsigned long toffset, moffset;
+ unsigned dim, llevel, mslice, width, height, depth, i;
+ u32 texdw[8];
+ int r;
+
+ texdw[0] = radeon_get_ib_value(p, idx + 0);
+ texdw[1] = radeon_get_ib_value(p, idx + 1);
+ texdw[2] = radeon_get_ib_value(p, idx + 2);
+ texdw[3] = radeon_get_ib_value(p, idx + 3);
+ texdw[4] = radeon_get_ib_value(p, idx + 4);
+ texdw[5] = radeon_get_ib_value(p, idx + 5);
+ texdw[6] = radeon_get_ib_value(p, idx + 6);
+ texdw[7] = radeon_get_ib_value(p, idx + 7);
+ dim = G_030000_DIM(texdw[0]);
+ llevel = G_030014_LAST_LEVEL(texdw[5]);
+ mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
+ width = G_030000_TEX_WIDTH(texdw[0]) + 1;
+ height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
+ depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
+ surf.format = G_03001C_DATA_FORMAT(texdw[7]);
+ surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
+ surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
+ surf.nby = r600_fmt_get_nblocksy(surf.format, height);
+ surf.mode = G_030004_ARRAY_MODE(texdw[1]);
+ surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
+ surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
+ surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
+ surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
+ surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
+ surf.nsamples = 1;
+ toffset = texdw[2] << 8;
+ moffset = texdw[3] << 8;
+
+ if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
+ dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, surf.format);
+ return -EINVAL;
+ }
+ switch (dim) {
+ case V_030000_SQ_TEX_DIM_1D:
+ case V_030000_SQ_TEX_DIM_2D:
+ case V_030000_SQ_TEX_DIM_CUBEMAP:
+ case V_030000_SQ_TEX_DIM_1D_ARRAY:
+ case V_030000_SQ_TEX_DIM_2D_ARRAY:
+ depth = 1;
+ case V_030000_SQ_TEX_DIM_3D:
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
+ __func__, __LINE__, dim);
+ return -EINVAL;
+ }
+
+ r = evergreen_surface_value_conv_check(p, &surf, "texture");
+ if (r) {
+ return r;
+ }
+
+ /* align height */
+ evergreen_surface_check(p, &surf, NULL);
+ surf.nby = ALIGN(surf.nby, surf.halign);
+
+ r = evergreen_surface_check(p, &surf, "texture");
+ if (r) {
+ dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ __func__, __LINE__, texdw[0], texdw[1], texdw[4],
+ texdw[5], texdw[6], texdw[7]);
+ return r;
+ }
+
+ /* check texture size */
+ if (toffset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, toffset, surf.base_align);
+ return -EINVAL;
+ }
+ if (moffset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, moffset, surf.base_align);
+ return -EINVAL;
+ }
+ if (dim == SQ_TEX_DIM_3D) {
+ toffset += surf.layer_size * depth;
+ } else {
+ toffset += surf.layer_size * mslice;
+ }
+ if (toffset > radeon_bo_size(texture)) {
+ dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
+ "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)texdw[2] << 8, mslice,
+ depth, radeon_bo_size(texture),
+ surf.nbx, surf.nby);
+ return -EINVAL;
+ }
+
+ /* check mipmap size */
+ for (i = 1; i <= llevel; i++) {
+ unsigned w, h, d;
+
+ w = r600_mip_minify(width, i);
+ h = r600_mip_minify(height, i);
+ d = r600_mip_minify(depth, i);
+ surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
+ surf.nby = r600_fmt_get_nblocksy(surf.format, h);
+
+ switch (surf.mode) {
+ case ARRAY_2D_TILED_THIN1:
+ if (surf.nbx < surf.palign || surf.nby < surf.halign) {
+ surf.mode = ARRAY_1D_TILED_THIN1;
+ }
+ /* recompute alignment */
+ evergreen_surface_check(p, &surf, NULL);
+ break;
+ case ARRAY_LINEAR_GENERAL:
+ case ARRAY_LINEAR_ALIGNED:
+ case ARRAY_1D_TILED_THIN1:
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid array mode %d\n",
+ __func__, __LINE__, surf.mode);
+ return -EINVAL;
+ }
+ surf.nbx = ALIGN(surf.nbx, surf.palign);
+ surf.nby = ALIGN(surf.nby, surf.halign);
+
+ r = evergreen_surface_check(p, &surf, "mipmap");
+ if (r) {
+ return r;
+ }
+
+ if (dim == SQ_TEX_DIM_3D) {
+ moffset += surf.layer_size * d;
+ } else {
+ moffset += surf.layer_size * mslice;
+ }
+ if (moffset > radeon_bo_size(mipmap)) {
+ dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
+ "offset %ld, coffset %ld, max layer %d, depth %d, "
+ "bo size %ld) level0 (%d %d %d)\n",
+ __func__, __LINE__, i, surf.layer_size,
+ (unsigned long)texdw[3] << 8, moffset, mslice,
+ d, radeon_bo_size(mipmap),
+ width, height, depth);
+ dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+ __func__, __LINE__, surf.nbx, surf.nby,
+ surf.mode, surf.bpe, surf.nsamples,
+ surf.bankw, surf.bankh,
+ surf.tsplit, surf.mtilea);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned tmp, i;
+ int r;
+ unsigned buffer_mask = 0;
+
+ /* check streamout */
+ if (track->streamout_dirty && track->vgt_strmout_config) {
+ for (i = 0; i < 4; i++) {
+ if (track->vgt_strmout_config & (1 << i)) {
+ buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (buffer_mask & (1 << i)) {
+ if (track->vgt_strmout_bo[i]) {
+ u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+ (u64)track->vgt_strmout_size[i];
+ if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+ DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
+ return -EINVAL;
+ }
+ } else {
+ dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ return -EINVAL;
+ }
+ }
+ }
+ track->streamout_dirty = false;
+ }
+
+ if (track->sx_misc_kill_all_prims)
+ return 0;
+
+ /* check that we have a cb for each enabled target
+ */
+ if (track->cb_dirty) {
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
+ }
+ /* check cb */
+ r = evergreen_cs_track_validate_cb(p, i);
+ if (r) {
+ return r;
+ }
+ }
+ }
+ track->cb_dirty = false;
+ }
+
+ if (track->db_dirty) {
+ /* Check stencil buffer */
+ if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+ r = evergreen_cs_track_validate_stencil(p);
+ if (r)
+ return r;
+ }
+ /* Check depth buffer */
+ if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) {
+ r = evergreen_cs_track_validate_depth(p);
+ if (r)
+ return r;
+ }
+ track->db_dirty = false;
+ }
+
return 0;
}
@@ -503,6 +1189,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
break;
case CAYMAN_DB_EQAA:
if (p->rdev->family < CHIP_CAYMAN) {
@@ -532,20 +1219,35 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
- ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx] |= DB_TILE_SPLIT(tile_split) |
+ DB_BANK_WIDTH(bankw) |
+ DB_BANK_HEIGHT(bankh) |
+ DB_MACRO_TILE_ASPECT(mtaspect);
}
}
+ track->db_dirty = true;
break;
case DB_STENCIL_INFO:
track->db_s_info = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
break;
case DB_DEPTH_VIEW:
track->db_depth_view = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
break;
case DB_DEPTH_SIZE:
track->db_depth_size = radeon_get_ib_value(p, idx);
- track->db_depth_size_idx = idx;
+ track->db_dirty = true;
+ break;
+ case R_02805C_DB_DEPTH_SLICE:
+ track->db_depth_slice = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
break;
case DB_Z_READ_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -557,6 +1259,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_z_read_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_z_read_bo = reloc->robj;
+ track->db_dirty = true;
break;
case DB_Z_WRITE_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -568,6 +1271,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_z_write_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_z_write_bo = reloc->robj;
+ track->db_dirty = true;
break;
case DB_STENCIL_READ_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -579,6 +1283,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_s_read_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_s_read_bo = reloc->robj;
+ track->db_dirty = true;
break;
case DB_STENCIL_WRITE_BASE:
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -590,18 +1295,56 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_s_write_offset = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_s_write_bo = reloc->robj;
+ track->db_dirty = true;
break;
case VGT_STRMOUT_CONFIG:
track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
break;
case VGT_STRMOUT_BUFFER_CONFIG:
track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
break;
+ case VGT_STRMOUT_BUFFER_BASE_0:
+ case VGT_STRMOUT_BUFFER_BASE_1:
+ case VGT_STRMOUT_BUFFER_BASE_2:
+ case VGT_STRMOUT_BUFFER_BASE_3:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+ track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->vgt_strmout_bo[tmp] = reloc->robj;
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_SIZE_0:
+ case VGT_STRMOUT_BUFFER_SIZE_1:
+ case VGT_STRMOUT_BUFFER_SIZE_2:
+ case VGT_STRMOUT_BUFFER_SIZE_3:
+ tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+ /* size in register is DWs, convert to bytes */
+ track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+ track->streamout_dirty = true;
+ break;
+ case CP_COHER_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
break;
case CB_SHADER_MASK:
track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
break;
case PA_SC_AA_CONFIG:
if (p->rdev->family >= CHIP_CAYMAN) {
@@ -631,6 +1374,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_VIEW:
tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
break;
case CB_COLOR8_VIEW:
case CB_COLOR9_VIEW:
@@ -638,6 +1382,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_VIEW:
tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
break;
case CB_COLOR0_INFO:
case CB_COLOR1_INFO:
@@ -659,6 +1404,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
+ track->cb_dirty = true;
break;
case CB_COLOR8_INFO:
case CB_COLOR9_INFO:
@@ -676,6 +1422,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
+ track->cb_dirty = true;
break;
case CB_COLOR0_PITCH:
case CB_COLOR1_PITCH:
@@ -687,7 +1434,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_PITCH:
tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
- track->cb_color_pitch_idx[tmp] = idx;
+ track->cb_dirty = true;
break;
case CB_COLOR8_PITCH:
case CB_COLOR9_PITCH:
@@ -695,7 +1442,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_PITCH:
tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
- track->cb_color_pitch_idx[tmp] = idx;
+ track->cb_dirty = true;
break;
case CB_COLOR0_SLICE:
case CB_COLOR1_SLICE:
@@ -707,7 +1454,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_SLICE:
tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
- track->cb_color_slice_idx[tmp] = idx;
+ track->cb_dirty = true;
break;
case CB_COLOR8_SLICE:
case CB_COLOR9_SLICE:
@@ -715,7 +1462,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_SLICE:
tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
- track->cb_color_slice_idx[tmp] = idx;
+ track->cb_dirty = true;
break;
case CB_COLOR0_ATTRIB:
case CB_COLOR1_ATTRIB:
@@ -725,6 +1472,30 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_ATTRIB:
case CB_COLOR6_ATTRIB:
case CB_COLOR7_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(tile_split) |
+ CB_BANK_WIDTH(bankw) |
+ CB_BANK_HEIGHT(bankh) |
+ CB_MACRO_TILE_ASPECT(mtaspect);
+ }
+ }
+ tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
+ track->cb_color_attrib[tmp] = ib[idx];
+ track->cb_dirty = true;
+ break;
case CB_COLOR8_ATTRIB:
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
@@ -735,30 +1506,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
- ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(tile_split) |
+ CB_BANK_WIDTH(bankw) |
+ CB_BANK_HEIGHT(bankh) |
+ CB_MACRO_TILE_ASPECT(mtaspect);
+ }
}
- break;
- case CB_COLOR0_DIM:
- case CB_COLOR1_DIM:
- case CB_COLOR2_DIM:
- case CB_COLOR3_DIM:
- case CB_COLOR4_DIM:
- case CB_COLOR5_DIM:
- case CB_COLOR6_DIM:
- case CB_COLOR7_DIM:
- tmp = (reg - CB_COLOR0_DIM) / 0x3c;
- track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
- track->cb_color_dim_idx[tmp] = idx;
- break;
- case CB_COLOR8_DIM:
- case CB_COLOR9_DIM:
- case CB_COLOR10_DIM:
- case CB_COLOR11_DIM:
- tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
- track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
- track->cb_color_dim_idx[tmp] = idx;
+ tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
+ track->cb_color_attrib[tmp] = ib[idx];
+ track->cb_dirty = true;
break;
case CB_COLOR0_FMASK:
case CB_COLOR1_FMASK:
@@ -833,8 +1597,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
+ track->cb_dirty = true;
break;
case CB_COLOR8_BASE:
case CB_COLOR9_BASE:
@@ -849,8 +1613,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
+ track->cb_dirty = true;
break;
case CB_IMMED0_BASE:
case CB_IMMED1_BASE:
@@ -989,6 +1753,9 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
+ case SX_MISC:
+ track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+ break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
@@ -996,22 +1763,30 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return 0;
}
-/**
- * evergreen_check_texture_resource() - check if register is authorized or not
- * @p: parser structure holding parsing context
- * @idx: index into the cs buffer
- * @texture: texture's bo structure
- * @mipmap: mipmap's bo structure
- *
- * This function will check that the resource has valid field and that
- * the texture and mipmap bo object are big enough to cover this resource.
- */
-static int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
- struct radeon_bo *texture,
- struct radeon_bo *mipmap)
+static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
- /* XXX fill in */
- return 0;
+ u32 last_reg, m, i;
+
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+ i = (reg >> 7);
+ if (i >= last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return true;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return true;
+ }
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
}
static int evergreen_packet3_check(struct radeon_cs_parser *p,
@@ -1036,6 +1811,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
int pred_op;
int tmp;
+ uint64_t offset;
+
if (pkt->count != 1) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1059,8 +1836,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ offset = reloc->lobj.gpu_offset +
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
+
+ ib[idx + 0] = offset;
+ ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
}
break;
case PACKET3_CONTEXT_CONTROL:
@@ -1088,6 +1869,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_INDEX_BASE:
+ {
+ uint64_t offset;
+
if (pkt->count != 1) {
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
@@ -1097,15 +1881,24 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
- ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
+ }
case PACKET3_DRAW_INDEX:
+ {
+ uint64_t offset;
if (pkt->count != 3) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1115,15 +1908,25 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
+ }
case PACKET3_DRAW_INDEX_2:
+ {
+ uint64_t offset;
+
if (pkt->count != 4) {
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
@@ -1133,14 +1936,21 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
- ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ radeon_get_ib_value(p, idx+1) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
+ }
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
@@ -1231,13 +2041,20 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
/* bit 4 is reg (0) or mem (1) */
if (idx_value & 0x10) {
+ uint64_t offset;
+
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
case PACKET3_SURFACE_SYNC:
@@ -1262,16 +2079,25 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (pkt->count) {
+ uint64_t offset;
+
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffff8;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
case PACKET3_EVENT_WRITE_EOP:
+ {
+ uint64_t offset;
+
if (pkt->count != 4) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -1281,10 +2107,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffffc;
+ ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
break;
+ }
case PACKET3_EVENT_WRITE_EOS:
+ {
+ uint64_t offset;
+
if (pkt->count != 3) {
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
@@ -1294,9 +2129,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffffc;
+ ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
break;
+ }
case PACKET3_SET_CONFIG_REG:
start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
end_reg = 4 * pkt->count + start_reg - 4;
@@ -1344,6 +2185,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < (pkt->count / 8); i++) {
struct radeon_bo *texture, *mipmap;
+ u32 toffset, moffset;
u32 size, offset;
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
@@ -1354,32 +2196,42 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
- ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
ib[idx+1+(i*8)+1] |=
TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx+1+(i*8)+6] |=
- TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
+ ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
ib[idx+1+(i*8)+7] |=
+ TEX_BANK_WIDTH(bankw) |
+ TEX_BANK_HEIGHT(bankh) |
+ MACRO_TILE_ASPECT(mtaspect) |
TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
}
}
texture = reloc->robj;
+ toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
/* tex mip base */
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
- ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
- r = evergreen_check_texture_resource(p, idx+1+(i*8),
- texture, mipmap);
+ r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
if (r)
return r;
+ ib[idx+1+(i*8)+2] += toffset;
+ ib[idx+1+(i*8)+3] += moffset;
break;
case SQ_TEX_VTX_VALID_BUFFER:
+ {
+ uint64_t offset64;
/* vtx base */
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -1391,11 +2243,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
dev_warn(p->dev, "vbo resource seems too big for the bo\n");
- ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
+ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
}
- ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
- ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset64 = reloc->lobj.gpu_offset + offset;
+ ib[idx+1+(i*8)+0] = offset64;
+ ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+ (upper_32_bits(offset64) & 0xff);
break;
+ }
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
@@ -1451,6 +2307,104 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ return -EINVAL;
+ }
+ /* Updating memory at DST_ADDRESS. */
+ if (idx_value & 0x1) {
+ u64 offset;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ }
+ /* Reading data from SRC_ADDRESS. */
+ if (((idx_value >> 1) & 0x3) == 2) {
+ u64 offset;
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad COPY_DW (invalid count)\n");
+ return -EINVAL;
+ }
+ if (idx_value & 0x1) {
+ u64 offset;
+ /* SRC is memory. */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* SRC is a reg. */
+ reg = radeon_get_ib_value(p, idx+1) << 2;
+ if (!evergreen_is_safe_reg(p, reg, idx+1))
+ return -EINVAL;
+ }
+ if (idx_value & 0x2) {
+ u64 offset;
+ /* DST is memory. */
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* DST is a reg. */
+ reg = radeon_get_ib_value(p, idx+3) << 2;
+ if (!evergreen_is_safe_reg(p, reg, idx+3))
+ return -EINVAL;
+ }
+ break;
case PACKET3_NOP:
break;
default:
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 4215de9..96c10b3 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -219,6 +219,7 @@
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c
+# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 74713d4..eb5708c 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -77,6 +77,7 @@
#define CONFIG_MEMSIZE 0x5428
+#define CP_COHER_BASE 0x85F8
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
@@ -925,7 +926,70 @@
#define DB_DEBUG4 0x983C
#define DB_WATERMARKS 0x9854
#define DB_DEPTH_CONTROL 0x28800
+#define R_028800_DB_DEPTH_CONTROL 0x028800
+#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
+#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
+#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
+#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
+#define C_028800_Z_ENABLE 0xFFFFFFFD
+#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
+#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
+#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
+#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
+#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
+#define C_028800_ZFUNC 0xFFFFFF8F
+#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
+#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
+#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
+#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
+#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
+#define C_028800_STENCILFUNC 0xFFFFF8FF
+#define V_028800_STENCILFUNC_NEVER 0x00000000
+#define V_028800_STENCILFUNC_LESS 0x00000001
+#define V_028800_STENCILFUNC_EQUAL 0x00000002
+#define V_028800_STENCILFUNC_LEQUAL 0x00000003
+#define V_028800_STENCILFUNC_GREATER 0x00000004
+#define V_028800_STENCILFUNC_NOTEQUAL 0x00000005
+#define V_028800_STENCILFUNC_GEQUAL 0x00000006
+#define V_028800_STENCILFUNC_ALWAYS 0x00000007
+#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
+#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
+#define C_028800_STENCILFAIL 0xFFFFC7FF
+#define V_028800_STENCIL_KEEP 0x00000000
+#define V_028800_STENCIL_ZERO 0x00000001
+#define V_028800_STENCIL_REPLACE 0x00000002
+#define V_028800_STENCIL_INCR 0x00000003
+#define V_028800_STENCIL_DECR 0x00000004
+#define V_028800_STENCIL_INVERT 0x00000005
+#define V_028800_STENCIL_INCR_WRAP 0x00000006
+#define V_028800_STENCIL_DECR_WRAP 0x00000007
+#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
+#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
+#define C_028800_STENCILZPASS 0xFFFE3FFF
+#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
+#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
+#define C_028800_STENCILZFAIL 0xFFF1FFFF
+#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
+#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
+#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
+#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
+#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
+#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
+#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
+#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
+#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
+#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
+#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
+#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
#define DB_DEPTH_VIEW 0x28008
+#define R_028008_DB_DEPTH_VIEW 0x00028008
+#define S_028008_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028008_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028008_SLICE_START 0xFFFFF800
+#define S_028008_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028008_SLICE_MAX 0xFF001FFF
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
@@ -933,12 +997,59 @@
# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
+# define DB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
+#define R_028040_DB_Z_INFO 0x028040
+#define S_028040_FORMAT(x) (((x) & 0x3) << 0)
+#define G_028040_FORMAT(x) (((x) >> 0) & 0x3)
+#define C_028040_FORMAT 0xFFFFFFFC
+#define V_028040_Z_INVALID 0x00000000
+#define V_028040_Z_16 0x00000001
+#define V_028040_Z_24 0x00000002
+#define V_028040_Z_32_FLOAT 0x00000003
+#define S_028040_ARRAY_MODE(x) (((x) & 0xF) << 4)
+#define G_028040_ARRAY_MODE(x) (((x) >> 4) & 0xF)
+#define C_028040_ARRAY_MODE 0xFFFFFF0F
+#define S_028040_READ_SIZE(x) (((x) & 0x1) << 28)
+#define G_028040_READ_SIZE(x) (((x) >> 28) & 0x1)
+#define C_028040_READ_SIZE 0xEFFFFFFF
+#define S_028040_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 29)
+#define G_028040_TILE_SURFACE_ENABLE(x) (((x) >> 29) & 0x1)
+#define C_028040_TILE_SURFACE_ENABLE 0xDFFFFFFF
+#define S_028040_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
+#define G_028040_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
+#define C_028040_ZRANGE_PRECISION 0x7FFFFFFF
+#define S_028040_TILE_SPLIT(x) (((x) & 0x7) << 8)
+#define G_028040_TILE_SPLIT(x) (((x) >> 8) & 0x7)
+#define S_028040_NUM_BANKS(x) (((x) & 0x3) << 12)
+#define G_028040_NUM_BANKS(x) (((x) >> 12) & 0x3)
+#define S_028040_BANK_WIDTH(x) (((x) & 0x3) << 16)
+#define G_028040_BANK_WIDTH(x) (((x) >> 16) & 0x3)
+#define S_028040_BANK_HEIGHT(x) (((x) & 0x3) << 20)
+#define G_028040_BANK_HEIGHT(x) (((x) >> 20) & 0x3)
+#define S_028040_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
+#define G_028040_MACRO_TILE_ASPECT(x) (((x) >> 24) & 0x3)
#define DB_STENCIL_INFO 0x28044
+#define R_028044_DB_STENCIL_INFO 0x028044
+#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
+#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
+#define C_028044_FORMAT 0xFFFFFFFE
+#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
#define DB_Z_WRITE_BASE 0x28050
#define DB_STENCIL_WRITE_BASE 0x28054
#define DB_DEPTH_SIZE 0x28058
+#define R_028058_DB_DEPTH_SIZE 0x028058
+#define S_028058_PITCH_TILE_MAX(x) (((x) & 0x7FF) << 0)
+#define G_028058_PITCH_TILE_MAX(x) (((x) >> 0) & 0x7FF)
+#define C_028058_PITCH_TILE_MAX 0xFFFFF800
+#define S_028058_HEIGHT_TILE_MAX(x) (((x) & 0x7FF) << 11)
+#define G_028058_HEIGHT_TILE_MAX(x) (((x) >> 11) & 0x7FF)
+#define C_028058_HEIGHT_TILE_MAX 0xFFC007FF
+#define R_02805C_DB_DEPTH_SLICE 0x02805C
+#define S_02805C_SLICE_TILE_MAX(x) (((x) & 0x3FFFFF) << 0)
+#define G_02805C_SLICE_TILE_MAX(x) (((x) >> 0) & 0x3FFFFF)
+#define C_02805C_SLICE_TILE_MAX 0xFFC00000
#define SQ_PGM_START_PS 0x28840
#define SQ_PGM_START_VS 0x2885c
@@ -948,6 +1059,14 @@
#define SQ_PGM_START_HS 0x288b8
#define SQ_PGM_START_LS 0x288d0
+#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
+#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
+#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
+#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
+#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
#define VGT_STRMOUT_CONFIG 0x28b94
#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
@@ -974,6 +1093,114 @@
#define CB_COLOR0_PITCH 0x28c64
#define CB_COLOR0_SLICE 0x28c68
#define CB_COLOR0_VIEW 0x28c6c
+#define R_028C6C_CB_COLOR0_VIEW 0x00028C6C
+#define S_028C6C_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028C6C_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028C6C_SLICE_START 0xFFFFF800
+#define S_028C6C_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028C6C_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028C6C_SLICE_MAX 0xFF001FFF
+#define R_028C70_CB_COLOR0_INFO 0x028C70
+#define S_028C70_ENDIAN(x) (((x) & 0x3) << 0)
+#define G_028C70_ENDIAN(x) (((x) >> 0) & 0x3)
+#define C_028C70_ENDIAN 0xFFFFFFFC
+#define S_028C70_FORMAT(x) (((x) & 0x3F) << 2)
+#define G_028C70_FORMAT(x) (((x) >> 2) & 0x3F)
+#define C_028C70_FORMAT 0xFFFFFF03
+#define V_028C70_COLOR_INVALID 0x00000000
+#define V_028C70_COLOR_8 0x00000001
+#define V_028C70_COLOR_4_4 0x00000002
+#define V_028C70_COLOR_3_3_2 0x00000003
+#define V_028C70_COLOR_16 0x00000005
+#define V_028C70_COLOR_16_FLOAT 0x00000006
+#define V_028C70_COLOR_8_8 0x00000007
+#define V_028C70_COLOR_5_6_5 0x00000008
+#define V_028C70_COLOR_6_5_5 0x00000009
+#define V_028C70_COLOR_1_5_5_5 0x0000000A
+#define V_028C70_COLOR_4_4_4_4 0x0000000B
+#define V_028C70_COLOR_5_5_5_1 0x0000000C
+#define V_028C70_COLOR_32 0x0000000D
+#define V_028C70_COLOR_32_FLOAT 0x0000000E
+#define V_028C70_COLOR_16_16 0x0000000F
+#define V_028C70_COLOR_16_16_FLOAT 0x00000010
+#define V_028C70_COLOR_8_24 0x00000011
+#define V_028C70_COLOR_8_24_FLOAT 0x00000012
+#define V_028C70_COLOR_24_8 0x00000013
+#define V_028C70_COLOR_24_8_FLOAT 0x00000014
+#define V_028C70_COLOR_10_11_11 0x00000015
+#define V_028C70_COLOR_10_11_11_FLOAT 0x00000016
+#define V_028C70_COLOR_11_11_10 0x00000017
+#define V_028C70_COLOR_11_11_10_FLOAT 0x00000018
+#define V_028C70_COLOR_2_10_10_10 0x00000019
+#define V_028C70_COLOR_8_8_8_8 0x0000001A
+#define V_028C70_COLOR_10_10_10_2 0x0000001B
+#define V_028C70_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_028C70_COLOR_32_32 0x0000001D
+#define V_028C70_COLOR_32_32_FLOAT 0x0000001E
+#define V_028C70_COLOR_16_16_16_16 0x0000001F
+#define V_028C70_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_028C70_COLOR_32_32_32_32 0x00000022
+#define V_028C70_COLOR_32_32_32_32_FLOAT 0x00000023
+#define V_028C70_COLOR_32_32_32_FLOAT 0x00000030
+#define S_028C70_ARRAY_MODE(x) (((x) & 0xF) << 8)
+#define G_028C70_ARRAY_MODE(x) (((x) >> 8) & 0xF)
+#define C_028C70_ARRAY_MODE 0xFFFFF0FF
+#define V_028C70_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_028C70_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_028C70_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_028C70_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_028C70_NUMBER_TYPE(x) (((x) & 0x7) << 12)
+#define G_028C70_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
+#define C_028C70_NUMBER_TYPE 0xFFFF8FFF
+#define V_028C70_NUMBER_UNORM 0x00000000
+#define V_028C70_NUMBER_SNORM 0x00000001
+#define V_028C70_NUMBER_USCALED 0x00000002
+#define V_028C70_NUMBER_SSCALED 0x00000003
+#define V_028C70_NUMBER_UINT 0x00000004
+#define V_028C70_NUMBER_SINT 0x00000005
+#define V_028C70_NUMBER_SRGB 0x00000006
+#define V_028C70_NUMBER_FLOAT 0x00000007
+#define S_028C70_COMP_SWAP(x) (((x) & 0x3) << 15)
+#define G_028C70_COMP_SWAP(x) (((x) >> 15) & 0x3)
+#define C_028C70_COMP_SWAP 0xFFFE7FFF
+#define V_028C70_SWAP_STD 0x00000000
+#define V_028C70_SWAP_ALT 0x00000001
+#define V_028C70_SWAP_STD_REV 0x00000002
+#define V_028C70_SWAP_ALT_REV 0x00000003
+#define S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17)
+#define G_028C70_FAST_CLEAR(x) (((x) >> 17) & 0x1)
+#define C_028C70_FAST_CLEAR 0xFFFDFFFF
+#define S_028C70_COMPRESSION(x) (((x) & 0x3) << 18)
+#define G_028C70_COMPRESSION(x) (((x) >> 18) & 0x3)
+#define C_028C70_COMPRESSION 0xFFF3FFFF
+#define S_028C70_BLEND_CLAMP(x) (((x) & 0x1) << 19)
+#define G_028C70_BLEND_CLAMP(x) (((x) >> 19) & 0x1)
+#define C_028C70_BLEND_CLAMP 0xFFF7FFFF
+#define S_028C70_BLEND_BYPASS(x) (((x) & 0x1) << 20)
+#define G_028C70_BLEND_BYPASS(x) (((x) >> 20) & 0x1)
+#define C_028C70_BLEND_BYPASS 0xFFEFFFFF
+#define S_028C70_SIMPLE_FLOAT(x) (((x) & 0x1) << 21)
+#define G_028C70_SIMPLE_FLOAT(x) (((x) >> 21) & 0x1)
+#define C_028C70_SIMPLE_FLOAT 0xFFDFFFFF
+#define S_028C70_ROUND_MODE(x) (((x) & 0x1) << 22)
+#define G_028C70_ROUND_MODE(x) (((x) >> 22) & 0x1)
+#define C_028C70_ROUND_MODE 0xFFBFFFFF
+#define S_028C70_TILE_COMPACT(x) (((x) & 0x1) << 23)
+#define G_028C70_TILE_COMPACT(x) (((x) >> 23) & 0x1)
+#define C_028C70_TILE_COMPACT 0xFF7FFFFF
+#define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24)
+#define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3)
+#define C_028C70_SOURCE_FORMAT 0xFCFFFFFF
+#define V_028C70_EXPORT_4C_32BPC 0x0
+#define V_028C70_EXPORT_4C_16BPC 0x1
+#define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */
+#define S_028C70_RAT(x) (((x) & 0x1) << 26)
+#define G_028C70_RAT(x) (((x) >> 26) & 0x1)
+#define C_028C70_RAT 0xFBFFFFFF
+#define S_028C70_RESOURCE_TYPE(x) (((x) & 0x7) << 27)
+#define G_028C70_RESOURCE_TYPE(x) (((x) >> 27) & 0x7)
+#define C_028C70_RESOURCE_TYPE 0xC7FFFFFF
+
#define CB_COLOR0_INFO 0x28c70
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
@@ -984,6 +1211,20 @@
# define CB_SOURCE_FORMAT(x) ((x) << 24)
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
+#define R_028C74_CB_COLOR0_ATTRIB 0x028C74
+#define S_028C74_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 4)
+#define G_028C74_NON_DISP_TILING_ORDER(x) (((x) >> 4) & 0x1)
+#define C_028C74_NON_DISP_TILING_ORDER 0xFFFFFFEF
+#define S_028C74_TILE_SPLIT(x) (((x) & 0xf) << 5)
+#define G_028C74_TILE_SPLIT(x) (((x) >> 5) & 0xf)
+#define S_028C74_NUM_BANKS(x) (((x) & 0x3) << 10)
+#define G_028C74_NUM_BANKS(x) (((x) >> 10) & 0x3)
+#define S_028C74_BANK_WIDTH(x) (((x) & 0x3) << 13)
+#define G_028C74_BANK_WIDTH(x) (((x) >> 13) & 0x3)
+#define S_028C74_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+#define G_028C74_BANK_HEIGHT(x) (((x) >> 16) & 0x3)
+#define S_028C74_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
+#define G_028C74_MACRO_TILE_ASPECT(x) (((x) >> 19) & 0x3)
#define CB_COLOR0_ATTRIB 0x28c74
# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
# define ADDR_SURF_TILE_SPLIT_64B 0
@@ -1008,6 +1249,7 @@
# define ADDR_SURF_BANK_HEIGHT_2 1
# define ADDR_SURF_BANK_HEIGHT_4 2
# define ADDR_SURF_BANK_HEIGHT_8 3
+# define CB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1196,9 +1438,144 @@
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
+#define R_030000_SQ_TEX_RESOURCE_WORD0_0 0x030000
+#define S_030000_DIM(x) (((x) & 0x7) << 0)
+#define G_030000_DIM(x) (((x) >> 0) & 0x7)
+#define C_030000_DIM 0xFFFFFFF8
+#define V_030000_SQ_TEX_DIM_1D 0x00000000
+#define V_030000_SQ_TEX_DIM_2D 0x00000001
+#define V_030000_SQ_TEX_DIM_3D 0x00000002
+#define V_030000_SQ_TEX_DIM_CUBEMAP 0x00000003
+#define V_030000_SQ_TEX_DIM_1D_ARRAY 0x00000004
+#define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005
+#define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006
+#define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
+#define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5)
+#define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1)
+#define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF
+#define S_030000_PITCH(x) (((x) & 0xFFF) << 6)
+#define G_030000_PITCH(x) (((x) >> 6) & 0xFFF)
+#define C_030000_PITCH 0xFFFC003F
+#define S_030000_TEX_WIDTH(x) (((x) & 0x3FFF) << 18)
+#define G_030000_TEX_WIDTH(x) (((x) >> 18) & 0x3FFF)
+#define C_030000_TEX_WIDTH 0x0003FFFF
+#define R_030004_SQ_TEX_RESOURCE_WORD1_0 0x030004
+#define S_030004_TEX_HEIGHT(x) (((x) & 0x3FFF) << 0)
+#define G_030004_TEX_HEIGHT(x) (((x) >> 0) & 0x3FFF)
+#define C_030004_TEX_HEIGHT 0xFFFFC000
+#define S_030004_TEX_DEPTH(x) (((x) & 0x1FFF) << 14)
+#define G_030004_TEX_DEPTH(x) (((x) >> 14) & 0x1FFF)
+#define C_030004_TEX_DEPTH 0xF8003FFF
+#define S_030004_ARRAY_MODE(x) (((x) & 0xF) << 28)
+#define G_030004_ARRAY_MODE(x) (((x) >> 28) & 0xF)
+#define C_030004_ARRAY_MODE 0x0FFFFFFF
+#define R_030008_SQ_TEX_RESOURCE_WORD2_0 0x030008
+#define S_030008_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_030008_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_030008_BASE_ADDRESS 0x00000000
+#define R_03000C_SQ_TEX_RESOURCE_WORD3_0 0x03000C
+#define S_03000C_MIP_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_03000C_MIP_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_03000C_MIP_ADDRESS 0x00000000
+#define R_030010_SQ_TEX_RESOURCE_WORD4_0 0x030010
+#define S_030010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
+#define G_030010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
+#define C_030010_FORMAT_COMP_X 0xFFFFFFFC
+#define V_030010_SQ_FORMAT_COMP_UNSIGNED 0x00000000
+#define V_030010_SQ_FORMAT_COMP_SIGNED 0x00000001
+#define V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED 0x00000002
+#define S_030010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
+#define G_030010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
+#define C_030010_FORMAT_COMP_Y 0xFFFFFFF3
+#define S_030010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
+#define G_030010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
+#define C_030010_FORMAT_COMP_Z 0xFFFFFFCF
+#define S_030010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
+#define G_030010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
+#define C_030010_FORMAT_COMP_W 0xFFFFFF3F
+#define S_030010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
+#define G_030010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
+#define C_030010_NUM_FORMAT_ALL 0xFFFFFCFF
+#define V_030010_SQ_NUM_FORMAT_NORM 0x00000000
+#define V_030010_SQ_NUM_FORMAT_INT 0x00000001
+#define V_030010_SQ_NUM_FORMAT_SCALED 0x00000002
+#define S_030010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
+#define G_030010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
+#define C_030010_SRF_MODE_ALL 0xFFFFFBFF
+#define V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE 0x00000000
+#define V_030010_SRF_MODE_NO_ZERO 0x00000001
+#define S_030010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
+#define G_030010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
+#define C_030010_FORCE_DEGAMMA 0xFFFFF7FF
+#define S_030010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
+#define G_030010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
+#define C_030010_ENDIAN_SWAP 0xFFFFCFFF
+#define S_030010_DST_SEL_X(x) (((x) & 0x7) << 16)
+#define G_030010_DST_SEL_X(x) (((x) >> 16) & 0x7)
+#define C_030010_DST_SEL_X 0xFFF8FFFF
+#define V_030010_SQ_SEL_X 0x00000000
+#define V_030010_SQ_SEL_Y 0x00000001
+#define V_030010_SQ_SEL_Z 0x00000002
+#define V_030010_SQ_SEL_W 0x00000003
+#define V_030010_SQ_SEL_0 0x00000004
+#define V_030010_SQ_SEL_1 0x00000005
+#define S_030010_DST_SEL_Y(x) (((x) & 0x7) << 19)
+#define G_030010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
+#define C_030010_DST_SEL_Y 0xFFC7FFFF
+#define S_030010_DST_SEL_Z(x) (((x) & 0x7) << 22)
+#define G_030010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
+#define C_030010_DST_SEL_Z 0xFE3FFFFF
+#define S_030010_DST_SEL_W(x) (((x) & 0x7) << 25)
+#define G_030010_DST_SEL_W(x) (((x) >> 25) & 0x7)
+#define C_030010_DST_SEL_W 0xF1FFFFFF
+#define S_030010_BASE_LEVEL(x) (((x) & 0xF) << 28)
+#define G_030010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
+#define C_030010_BASE_LEVEL 0x0FFFFFFF
+#define R_030014_SQ_TEX_RESOURCE_WORD5_0 0x030014
+#define S_030014_LAST_LEVEL(x) (((x) & 0xF) << 0)
+#define G_030014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
+#define C_030014_LAST_LEVEL 0xFFFFFFF0
+#define S_030014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
+#define G_030014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
+#define C_030014_BASE_ARRAY 0xFFFE000F
+#define S_030014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
+#define G_030014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
+#define C_030014_LAST_ARRAY 0xC001FFFF
+#define R_030018_SQ_TEX_RESOURCE_WORD6_0 0x030018
+#define S_030018_MAX_ANISO(x) (((x) & 0x7) << 0)
+#define G_030018_MAX_ANISO(x) (((x) >> 0) & 0x7)
+#define C_030018_MAX_ANISO 0xFFFFFFF8
+#define S_030018_PERF_MODULATION(x) (((x) & 0x7) << 3)
+#define G_030018_PERF_MODULATION(x) (((x) >> 3) & 0x7)
+#define C_030018_PERF_MODULATION 0xFFFFFFC7
+#define S_030018_INTERLACED(x) (((x) & 0x1) << 6)
+#define G_030018_INTERLACED(x) (((x) >> 6) & 0x1)
+#define C_030018_INTERLACED 0xFFFFFFBF
+#define S_030018_TILE_SPLIT(x) (((x) & 0x7) << 29)
+#define G_030018_TILE_SPLIT(x) (((x) >> 29) & 0x7)
+#define R_03001C_SQ_TEX_RESOURCE_WORD7_0 0x03001C
+#define S_03001C_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
+#define G_03001C_MACRO_TILE_ASPECT(x) (((x) >> 6) & 0x3)
+#define S_03001C_BANK_WIDTH(x) (((x) & 0x3) << 8)
+#define G_03001C_BANK_WIDTH(x) (((x) >> 8) & 0x3)
+#define S_03001C_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+#define G_03001C_BANK_HEIGHT(x) (((x) >> 10) & 0x3)
+#define S_03001C_NUM_BANKS(x) (((x) & 0x3) << 16)
+#define G_03001C_NUM_BANKS(x) (((x) >> 16) & 0x3)
+#define S_03001C_TYPE(x) (((x) & 0x3) << 30)
+#define G_03001C_TYPE(x) (((x) >> 30) & 0x3)
+#define C_03001C_TYPE 0x3FFFFFFF
+#define V_03001C_SQ_TEX_VTX_INVALID_TEXTURE 0x00000000
+#define V_03001C_SQ_TEX_VTX_INVALID_BUFFER 0x00000001
+#define V_03001C_SQ_TEX_VTX_VALID_TEXTURE 0x00000002
+#define V_03001C_SQ_TEX_VTX_VALID_BUFFER 0x00000003
+#define S_03001C_DATA_FORMAT(x) (((x) & 0x3F) << 0)
+#define G_03001C_DATA_FORMAT(x) (((x) >> 0) & 0x3F)
+#define C_03001C_DATA_FORMAT 0xFFFFFFC0
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 2509c50..160799c 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1318,7 +1318,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
/* this only test cp0 */
- r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
@@ -1466,7 +1466,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = evergreen_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
+ rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -1518,7 +1518,7 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 333cde9..81801c1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,40 @@ MODULE_FIRMWARE(FIRMWARE_R520);
#include "r100_track.h"
+void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ int i;
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+ break;
+ udelay(1);
+ }
+ }
+ } else {
+ if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+ break;
+ udelay(1);
+ }
+ }
+ }
+}
+
/* This files gather functions specifics to:
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
@@ -87,23 +121,27 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
+
value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_DST_TILE_MACRO;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_DST_TILE_MACRO;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ if (reg == RADEON_SRC_PITCH_OFFSET) {
+ DRM_ERROR("Cannot src blit from microtiled surface\n");
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ tile_flags |= RADEON_DST_TILE_MICRO;
}
- tile_flags |= RADEON_DST_TILE_MICRO;
- }
- tmp |= tile_flags;
- p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ tmp |= tile_flags;
+ p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ } else
+ p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
return 0;
}
@@ -412,7 +450,7 @@ void r100_pm_misc(struct radeon_device *rdev)
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
- rdev->asic->set_pcie_lanes &&
+ rdev->asic->pm.set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
@@ -592,8 +630,8 @@ int r100_pci_gart_init(struct radeon_device *rdev)
if (r)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+ rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -930,9 +968,8 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_ring_start(struct radeon_device *rdev)
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
r = radeon_ring_lock(rdev, ring, 2);
@@ -1143,8 +1180,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
- radeon_ring_start(rdev);
- r = radeon_ring_test(rdev, ring);
+ radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
return r;
@@ -1552,7 +1589,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_TXO_MICRO_TILE_X2;
+
+ tmp = idx_value & ~(0x7 << 2);
+ tmp |= tile_flags;
+ ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+ } else
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1623,15 +1670,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
-
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
-
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ } else
+ ib[idx] = idx_value;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
@@ -3691,7 +3740,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, ib->length_dw);
}
-int r100_ib_test(struct radeon_device *rdev)
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib *ib;
uint32_t scratch;
@@ -3916,7 +3965,7 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index eba4cbf..a59cc47 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -215,7 +215,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R200_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R200_TXO_MICRO_TILE;
+
+ tmp = idx_value & ~(0x7 << 2);
+ tmp |= tile_flags;
+ ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+ } else
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -277,14 +287,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ } else
+ ib[idx] = idx_value;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 6829638..fa14383 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -105,8 +105,8 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
if (r)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
@@ -206,9 +206,8 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
-void r300_ring_start(struct radeon_device *rdev)
+void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned gb_tile_config;
int r;
@@ -1419,7 +1418,7 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index b143230..f3fcaac 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -279,7 +279,7 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 3bd8f1b..ec576aa 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -351,6 +351,8 @@
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_STATUS 0x609c
+# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 25084e8..ebcc15b 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -33,7 +33,7 @@
/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
-static int r520_mc_wait_for_idle(struct radeon_device *rdev)
+int r520_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
@@ -207,7 +207,7 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 17ca72c..5eb2382 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2226,7 +2226,7 @@ int r600_cp_resume(struct radeon_device *rdev)
r600_cp_start(rdev);
ring->ready = true;
- r = radeon_ring_test(rdev, ring);
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
ring->ready = false;
return r;
@@ -2452,7 +2452,7 @@ int r600_startup(struct radeon_device *rdev)
r = r600_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
+ rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -2493,7 +2493,7 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
@@ -2701,13 +2701,14 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, ib->length_dw);
}
-int r600_ib_test(struct radeon_device *rdev, int ring)
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib *ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
int r;
+ int ring_index = radeon_ring_index(rdev, ring);
r = radeon_scratch_get(rdev, &scratch);
if (r) {
@@ -2715,7 +2716,7 @@ int r600_ib_test(struct radeon_device *rdev, int ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, ring, &ib, 256);
+ r = radeon_ib_get(rdev, ring_index, &ib, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
@@ -2723,20 +2724,7 @@ int r600_ib_test(struct radeon_device *rdev, int ring)
ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib->ptr[2] = 0xDEADBEEF;
- ib->ptr[3] = PACKET2(0);
- ib->ptr[4] = PACKET2(0);
- ib->ptr[5] = PACKET2(0);
- ib->ptr[6] = PACKET2(0);
- ib->ptr[7] = PACKET2(0);
- ib->ptr[8] = PACKET2(0);
- ib->ptr[9] = PACKET2(0);
- ib->ptr[10] = PACKET2(0);
- ib->ptr[11] = PACKET2(0);
- ib->ptr[12] = PACKET2(0);
- ib->ptr[13] = PACKET2(0);
- ib->ptr[14] = PACKET2(0);
- ib->ptr[15] = PACKET2(0);
- ib->length_dw = 16;
+ ib->length_dw = 3;
r = radeon_ib_schedule(rdev, ib);
if (r) {
radeon_scratch_free(rdev, scratch);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index accc032..db38f58 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -30,20 +30,7 @@
#include "r600d.h"
#include "r600_blit_shaders.h"
-
-#define DI_PT_RECTLIST 0x11
-#define DI_INDEX_SIZE_16_BIT 0x0
-#define DI_SRC_SEL_AUTO_INDEX 0x2
-
-#define FMT_8 0x1
-#define FMT_5_6_5 0x8
-#define FMT_8_8_8_8 0x1a
-#define COLOR_8 0x1
-#define COLOR_5_6_5 0x8
-#define COLOR_8_8_8_8 0x1a
-
-#define RECT_UNIT_H 32
-#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
+#include "radeon_blit_common.h"
/* emits 21 on rv770+, 23 on r600 */
static void
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 387fcc9..0ec3f20 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -52,15 +52,20 @@ struct r600_cs_track {
struct radeon_bo *cb_color_bo[8];
u64 cb_color_bo_mc[8];
u32 cb_color_bo_offset[8];
- struct radeon_bo *cb_color_frag_bo[8];
- struct radeon_bo *cb_color_tile_bo[8];
+ struct radeon_bo *cb_color_frag_bo[8]; /* unused */
+ struct radeon_bo *cb_color_tile_bo[8]; /* unused */
u32 cb_color_info[8];
- u32 cb_color_size_idx[8];
+ u32 cb_color_view[8];
+ u32 cb_color_size_idx[8]; /* unused */
u32 cb_target_mask;
- u32 cb_shader_mask;
+ u32 cb_shader_mask; /* unused */
u32 cb_color_size[8];
u32 vgt_strmout_en;
u32 vgt_strmout_buffer_en;
+ struct radeon_bo *vgt_strmout_bo[4];
+ u64 vgt_strmout_bo_mc[4]; /* unused */
+ u32 vgt_strmout_bo_offset[4];
+ u32 vgt_strmout_size[4];
u32 db_depth_control;
u32 db_depth_info;
u32 db_depth_size_idx;
@@ -69,13 +74,17 @@ struct r600_cs_track {
u32 db_offset;
struct radeon_bo *db_bo;
u64 db_bo_mc;
+ bool sx_misc_kill_all_prims;
+ bool cb_dirty;
+ bool db_dirty;
+ bool streamout_dirty;
};
#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
-#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
-#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
@@ -107,7 +116,7 @@ static const struct gpu_formats color_formats_table[] = {
/* 24-bit */
FMT_24_BIT(V_038004_FMT_8_8_8),
-
+
/* 32-bit */
FMT_32_BIT(V_038004_COLOR_32, 1),
FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
@@ -162,22 +171,22 @@ static const struct gpu_formats color_formats_table[] = {
[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
};
-static bool fmt_is_valid_color(u32 format)
+bool r600_fmt_is_valid_color(u32 format)
{
if (format >= ARRAY_SIZE(color_formats_table))
return false;
-
+
if (color_formats_table[format].valid_color)
return true;
return false;
}
-static bool fmt_is_valid_texture(u32 format, enum radeon_family family)
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
{
if (format >= ARRAY_SIZE(color_formats_table))
return false;
-
+
if (family < color_formats_table[format].min_family)
return false;
@@ -187,7 +196,7 @@ static bool fmt_is_valid_texture(u32 format, enum radeon_family family)
return false;
}
-static int fmt_get_blocksize(u32 format)
+int r600_fmt_get_blocksize(u32 format)
{
if (format >= ARRAY_SIZE(color_formats_table))
return 0;
@@ -195,7 +204,7 @@ static int fmt_get_blocksize(u32 format)
return color_formats_table[format].blocksize;
}
-static int fmt_get_nblocksx(u32 format, u32 w)
+int r600_fmt_get_nblocksx(u32 format, u32 w)
{
unsigned bw;
@@ -209,7 +218,7 @@ static int fmt_get_nblocksx(u32 format, u32 w)
return (w + bw - 1) / bw;
}
-static int fmt_get_nblocksy(u32 format, u32 h)
+int r600_fmt_get_nblocksy(u32 format, u32 h)
{
unsigned bh;
@@ -256,7 +265,7 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values,
break;
case ARRAY_LINEAR_ALIGNED:
*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
- *height_align = tile_height;
+ *height_align = 1;
*depth_align = 1;
*base_align = values->group_size;
break;
@@ -269,10 +278,9 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values,
*base_align = values->group_size;
break;
case ARRAY_2D_TILED_THIN1:
- *pitch_align = max((u32)macro_tile_width,
- (u32)(((values->group_size / tile_height) /
- (values->blocksize * values->nsamples)) *
- values->nbanks)) * tile_width;
+ *pitch_align = max((u32)macro_tile_width * tile_width,
+ (u32)((values->group_size * values->nbanks) /
+ (values->blocksize * values->nsamples * tile_width)));
*height_align = macro_tile_height * tile_height;
*depth_align = 1;
*base_align = max(macro_tile_bytes,
@@ -296,12 +304,14 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->cb_color_size[i] = 0;
track->cb_color_size_idx[i] = 0;
track->cb_color_info[i] = 0;
+ track->cb_color_view[i] = 0xFFFFFFFF;
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
track->cb_color_bo_mc[i] = 0xFFFFFFFF;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
+ track->cb_dirty = true;
track->db_bo = NULL;
track->db_bo_mc = 0xFFFFFFFF;
/* assume the biggest format and that htile is enabled */
@@ -310,6 +320,16 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->db_depth_size = 0xFFFFFFFF;
track->db_depth_size_idx = 0;
track->db_depth_control = 0xFFFFFFFF;
+ track->db_dirty = true;
+
+ for (i = 0; i < 4; i++) {
+ track->vgt_strmout_size[i] = 0;
+ track->vgt_strmout_bo[i] = NULL;
+ track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+ track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
+ }
+ track->streamout_dirty = true;
+ track->sx_misc_kill_all_prims = false;
}
static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
@@ -322,13 +342,14 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
volatile u32 *ib = p->ib->ptr;
unsigned array_mode;
u32 format;
+
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
format = G_0280A0_FORMAT(track->cb_color_info[i]);
- if (!fmt_is_valid_color(format)) {
+ if (!r600_fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
__func__, __LINE__, format,
i, track->cb_color_info[i]);
@@ -349,7 +370,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.blocksize = fmt_get_blocksize(format);
+ array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -393,7 +414,18 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
- tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
+ tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
+ switch (array_mode) {
+ default:
+ case V_0280A0_ARRAY_LINEAR_GENERAL:
+ case V_0280A0_ARRAY_LINEAR_ALIGNED:
+ tmp += track->cb_color_view[i] & 0xFF;
+ break;
+ case V_0280A0_ARRAY_1D_TILED_THIN1:
+ case V_0280A0_ARRAY_2D_TILED_THIN1:
+ tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
+ break;
+ }
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
@@ -403,10 +435,13 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
- array_mode,
+ dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
+ __func__, i, array_mode,
track->cb_color_bo_offset[i], tmp,
- radeon_bo_size(track->cb_color_bo[i]));
+ radeon_bo_size(track->cb_color_bo[i]),
+ pitch, height, r600_fmt_get_nblocksx(format, pitch),
+ r600_fmt_get_nblocksy(format, height),
+ r600_fmt_get_blocksize(format));
return -EINVAL;
}
}
@@ -430,143 +465,171 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
return 0;
- /* we don't support out buffer yet */
- if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
- dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
- return -EINVAL;
+
+ /* check streamout */
+ if (track->streamout_dirty && track->vgt_strmout_en) {
+ for (i = 0; i < 4; i++) {
+ if (track->vgt_strmout_buffer_en & (1 << i)) {
+ if (track->vgt_strmout_bo[i]) {
+ u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+ (u64)track->vgt_strmout_size[i];
+ if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+ DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
+ return -EINVAL;
+ }
+ } else {
+ dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ return -EINVAL;
+ }
+ }
+ }
+ track->streamout_dirty = false;
}
+
+ if (track->sx_misc_kill_all_prims)
+ return 0;
+
/* check that we have a cb for each enabled target, we don't check
* shader_mask because it seems mesa isn't always setting it :(
*/
- tmp = track->cb_target_mask;
- for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
- /* at least one component is enabled */
- if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
- return -EINVAL;
+ if (track->cb_dirty) {
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
+ }
+ /* perform rewrite of CB_COLOR[0-7]_SIZE */
+ r = r600_cs_track_validate_cb(p, i);
+ if (r)
+ return r;
}
- /* perform rewrite of CB_COLOR[0-7]_SIZE */
- r = r600_cs_track_validate_cb(p, i);
- if (r)
- return r;
}
+ track->cb_dirty = false;
}
- /* Check depth buffer */
- if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
- G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles, size, slice_tile_max;
- u32 height, height_align, pitch, pitch_align, depth_align;
- u64 base_offset, base_align;
- struct array_mode_checker array_check;
- int array_mode;
-
- if (track->db_bo == NULL) {
- dev_warn(p->dev, "z/stencil with no depth buffer\n");
- return -EINVAL;
- }
- if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
- dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
- return -EINVAL;
- }
- switch (G_028010_FORMAT(track->db_depth_info)) {
- case V_028010_DEPTH_16:
- bpe = 2;
- break;
- case V_028010_DEPTH_X8_24:
- case V_028010_DEPTH_8_24:
- case V_028010_DEPTH_X8_24_FLOAT:
- case V_028010_DEPTH_8_24_FLOAT:
- case V_028010_DEPTH_32_FLOAT:
- bpe = 4;
- break;
- case V_028010_DEPTH_X24_8_32_FLOAT:
- bpe = 8;
- break;
- default:
- dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
- return -EINVAL;
- }
- if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
- if (!track->db_depth_size_idx) {
- dev_warn(p->dev, "z/stencil buffer size not set\n");
- return -EINVAL;
- }
- tmp = radeon_bo_size(track->db_bo) - track->db_offset;
- tmp = (tmp / bpe) >> 6;
- if (!tmp) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
- track->db_depth_size, bpe, track->db_offset,
- radeon_bo_size(track->db_bo));
+
+ if (track->db_dirty) {
+ /* Check depth buffer */
+ if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control)) {
+ u32 nviews, bpe, ntiles, size, slice_tile_max;
+ u32 height, height_align, pitch, pitch_align, depth_align;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ int array_mode;
+
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
}
- ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
- } else {
- size = radeon_bo_size(track->db_bo);
- /* pitch in pixels */
- pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
- slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- slice_tile_max *= 64;
- height = slice_tile_max / pitch;
- if (height > 8192)
- height = 8192;
- base_offset = track->db_bo_mc + track->db_offset;
- array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
- array_check.array_mode = array_mode;
- array_check.group_size = track->group_size;
- array_check.nbanks = track->nbanks;
- array_check.npipes = track->npipes;
- array_check.nsamples = track->nsamples;
- array_check.blocksize = bpe;
- if (r600_get_array_mode_alignment(&array_check,
- &pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
return -EINVAL;
}
- switch (array_mode) {
- case V_028010_ARRAY_1D_TILED_THIN1:
- /* don't break userspace */
- height &= ~0x7;
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
break;
- case V_028010_ARRAY_2D_TILED_THIN1:
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
break;
default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
return -EINVAL;
}
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
+ return -EINVAL;
+ }
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ size = radeon_bo_size(track->db_bo);
+ /* pitch in pixels */
+ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+ slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
+ if (height > 8192)
+ height = 8192;
+ base_offset = track->db_bo_mc + track->db_offset;
+ array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.blocksize = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ switch (array_mode) {
+ case V_028010_ARRAY_1D_TILED_THIN1:
+ /* don't break userspace */
+ height &= ~0x7;
+ break;
+ case V_028010_ARRAY_2D_TILED_THIN1:
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
- return -EINVAL;
- }
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
+ return -EINVAL;
+ }
- ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
- tmp = ntiles * bpe * 64 * nviews;
- if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
- return -EINVAL;
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
}
}
+ track->db_dirty = false;
}
return 0;
}
@@ -939,6 +1002,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028800_DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
break;
case R_028010_DB_DEPTH_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
@@ -959,24 +1023,66 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
}
- } else
+ } else {
track->db_depth_info = radeon_get_ib_value(p, idx);
+ }
+ track->db_dirty = true;
break;
case R_028004_DB_DEPTH_VIEW:
track->db_depth_view = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
break;
case R_028000_DB_DEPTH_SIZE:
track->db_depth_size = radeon_get_ib_value(p, idx);
track->db_depth_size_idx = idx;
+ track->db_dirty = true;
break;
case R_028AB0_VGT_STRMOUT_EN:
track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
break;
case R_028B20_VGT_STRMOUT_BUFFER_EN:
track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_BASE_0:
+ case VGT_STRMOUT_BUFFER_BASE_1:
+ case VGT_STRMOUT_BUFFER_BASE_2:
+ case VGT_STRMOUT_BUFFER_BASE_3:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+ track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->vgt_strmout_bo[tmp] = reloc->robj;
+ track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_SIZE_0:
+ case VGT_STRMOUT_BUFFER_SIZE_1:
+ case VGT_STRMOUT_BUFFER_SIZE_2:
+ case VGT_STRMOUT_BUFFER_SIZE_3:
+ tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+ /* size in register is DWs, convert to bytes */
+ track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+ track->streamout_dirty = true;
+ break;
+ case CP_COHER_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case R_028238_CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
break;
case R_02823C_CB_SHADER_MASK:
track->cb_shader_mask = radeon_get_ib_value(p, idx);
@@ -984,6 +1090,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_028C04_PA_SC_AA_CONFIG:
tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
track->nsamples = 1 << tmp;
+ track->cb_dirty = true;
break;
case R_0280A0_CB_COLOR0_INFO:
case R_0280A4_CB_COLOR1_INFO:
@@ -1013,6 +1120,19 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
}
+ track->cb_dirty = true;
+ break;
+ case R_028080_CB_COLOR0_VIEW:
+ case R_028084_CB_COLOR1_VIEW:
+ case R_028088_CB_COLOR2_VIEW:
+ case R_02808C_CB_COLOR3_VIEW:
+ case R_028090_CB_COLOR4_VIEW:
+ case R_028094_CB_COLOR5_VIEW:
+ case R_028098_CB_COLOR6_VIEW:
+ case R_02809C_CB_COLOR7_VIEW:
+ tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
break;
case R_028060_CB_COLOR0_SIZE:
case R_028064_CB_COLOR1_SIZE:
@@ -1025,6 +1145,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
track->cb_color_size_idx[tmp] = idx;
+ track->cb_dirty = true;
break;
/* This register were added late, there is userspace
* which does provide relocation for those but set
@@ -1107,6 +1228,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
+ track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -1119,6 +1241,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
track->db_bo_mc = reloc->lobj.gpu_offset;
+ track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
case SQ_PGM_START_FS:
@@ -1191,6 +1314,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
+ case SX_MISC:
+ track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+ break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
@@ -1198,7 +1324,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return 0;
}
-static unsigned mip_minify(unsigned size, unsigned level)
+unsigned r600_mip_minify(unsigned size, unsigned level)
{
unsigned val;
@@ -1220,22 +1346,22 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned nlevels = llevel - blevel + 1;
*l0_size = -1;
- blocksize = fmt_get_blocksize(format);
+ blocksize = r600_fmt_get_blocksize(format);
- w0 = mip_minify(w0, 0);
- h0 = mip_minify(h0, 0);
- d0 = mip_minify(d0, 0);
+ w0 = r600_mip_minify(w0, 0);
+ h0 = r600_mip_minify(h0, 0);
+ d0 = r600_mip_minify(d0, 0);
for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
- width = mip_minify(w0, i);
- nbx = fmt_get_nblocksx(format, width);
+ width = r600_mip_minify(w0, i);
+ nbx = r600_fmt_get_nblocksx(format, width);
nbx = round_up(nbx, block_align);
- height = mip_minify(h0, i);
- nby = fmt_get_nblocksy(format, height);
+ height = r600_mip_minify(h0, i);
+ nby = r600_fmt_get_nblocksy(format, height);
nby = round_up(nby, height_align);
- depth = mip_minify(d0, i);
+ depth = r600_mip_minify(d0, i);
size = nbx * nby * blocksize;
if (nfaces)
@@ -1327,7 +1453,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
format = G_038004_DATA_FORMAT(word1);
- if (!fmt_is_valid_texture(format, p->family)) {
+ if (!r600_fmt_is_valid_texture(format, p->family)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
__func__, __LINE__, format);
return -EINVAL;
@@ -1340,7 +1466,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
- array_check.blocksize = fmt_get_blocksize(format);
+ array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1373,6 +1499,10 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
llevel = G_038014_LAST_LEVEL(word1);
+ if (blevel > llevel) {
+ dev_warn(p->dev, "texture blevel %d > llevel %d\n",
+ blevel, llevel);
+ }
if (array == 1) {
barray = G_038014_BASE_ARRAY(word1);
larray = G_038014_LAST_ARRAY(word1);
@@ -1384,8 +1514,10 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
if ((l0_size + word2) > radeon_bo_size(texture)) {
- dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
- w0, h0, format, word2, l0_size, radeon_bo_size(texture));
+ dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+ w0, h0, pitch_align, height_align,
+ array_check.array_mode, format, word2,
+ l0_size, radeon_bo_size(texture));
dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
@@ -1398,6 +1530,22 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return 0;
}
+static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ u32 m, i;
+
+ i = (reg >> 7);
+ if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(r600_reg_safe_bm[i] & m))
+ return true;
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
+}
+
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
@@ -1420,6 +1568,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
int pred_op;
int tmp;
+ uint64_t offset;
+
if (pkt->count != 1) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1443,8 +1593,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ offset = reloc->lobj.gpu_offset +
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
+
+ ib[idx + 0] = offset;
+ ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
}
break;
@@ -1468,6 +1622,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_DRAW_INDEX:
+ {
+ uint64_t offset;
if (pkt->count != 3) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1477,14 +1633,21 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+
r = r600_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
+ }
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
@@ -1515,13 +1678,20 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
/* bit 4 is reg (0) or mem (1) */
if (idx_value & 0x10) {
+ uint64_t offset;
+
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
case PACKET3_SURFACE_SYNC:
@@ -1546,16 +1716,25 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (pkt->count) {
+ uint64_t offset;
+
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffff8;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
case PACKET3_EVENT_WRITE_EOP:
+ {
+ uint64_t offset;
+
if (pkt->count != 4) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -1565,9 +1744,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffffc;
+ ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
break;
+ }
case PACKET3_SET_CONFIG_REG:
start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
end_reg = 4 * pkt->count + start_reg - 4;
@@ -1652,6 +1837,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1+(i*7)+3] += mip_offset;
break;
case SQ_TEX_VTX_VALID_BUFFER:
+ {
+ uint64_t offset64;
/* vtx base */
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -1664,11 +1851,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* force size to size of the buffer */
dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
size + offset, radeon_bo_size(reloc->robj));
- ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
+ ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
}
- ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
- ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+
+ offset64 = reloc->lobj.gpu_offset + offset;
+ ib[idx+1+(i*8)+0] = offset64;
+ ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+ (upper_32_bits(offset64) & 0xff);
break;
+ }
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
@@ -1743,6 +1934,104 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ return -EINVAL;
+ }
+ /* Updating memory at DST_ADDRESS. */
+ if (idx_value & 0x1) {
+ u64 offset;
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ }
+ /* Reading data from SRC_ADDRESS. */
+ if (((idx_value >> 1) & 0x3) == 2) {
+ u64 offset;
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad COPY_DW (invalid count)\n");
+ return -EINVAL;
+ }
+ if (idx_value & 0x1) {
+ u64 offset;
+ /* SRC is memory. */
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* SRC is a reg. */
+ reg = radeon_get_ib_value(p, idx+1) << 2;
+ if (!r600_is_safe_reg(p, reg, idx+1))
+ return -EINVAL;
+ }
+ if (idx_value & 0x2) {
+ u64 offset;
+ /* DST is memory. */
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* DST is a reg. */
+ reg = radeon_get_ib_value(p, idx+3) << 2;
+ if (!r600_is_safe_reg(p, reg, idx+3))
+ return -EINVAL;
+ }
+ break;
case PACKET3_NOP:
break;
default:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 9b23670..8ae328f 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -78,6 +78,20 @@
#define CB_COLOR0_SIZE 0x28060
#define CB_COLOR0_VIEW 0x28080
+#define R_028080_CB_COLOR0_VIEW 0x028080
+#define S_028080_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028080_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028080_SLICE_START 0xFFFFF800
+#define S_028080_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028080_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028080_SLICE_MAX 0xFF001FFF
+#define R_028084_CB_COLOR1_VIEW 0x028084
+#define R_028088_CB_COLOR2_VIEW 0x028088
+#define R_02808C_CB_COLOR3_VIEW 0x02808C
+#define R_028090_CB_COLOR4_VIEW 0x028090
+#define R_028094_CB_COLOR5_VIEW 0x028094
+#define R_028098_CB_COLOR6_VIEW 0x028098
+#define R_02809C_CB_COLOR7_VIEW 0x02809C
#define CB_COLOR0_INFO 0x280a0
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
@@ -493,6 +507,11 @@
#define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC
#define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC
#define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C
+#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
+
#define VGT_STRMOUT_EN 0x28AB0
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
@@ -835,6 +854,7 @@
# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1668ec1..d2870a0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,9 @@ extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
extern int evergreen_get_temp(struct radeon_device *rdev);
extern int sumo_get_temp(struct radeon_device *rdev);
+extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+ unsigned *bankh, unsigned *mtaspect,
+ unsigned *tile_split);
/*
* Fences.
@@ -411,9 +414,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
bool discardable, bool kernel,
struct drm_gem_object **obj);
-int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
- uint64_t *gpu_addr);
-void radeon_gem_object_unpin(struct drm_gem_object *obj);
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -780,7 +780,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_pool_start(struct radeon_device *rdev);
int radeon_ib_pool_suspend(struct radeon_device *rdev);
-int radeon_ib_test(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -833,7 +832,6 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
- bool sync_to_ring[RADEON_NUM_RINGS];
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
@@ -1132,57 +1130,6 @@ struct radeon_asic {
void (*vga_set_state)(struct radeon_device *rdev, bool state);
bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*asic_reset)(struct radeon_device *rdev);
- void (*gart_tlb_flush)(struct radeon_device *rdev);
- int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
- int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
- void (*cp_fini)(struct radeon_device *rdev);
- void (*cp_disable)(struct radeon_device *rdev);
- void (*ring_start)(struct radeon_device *rdev);
-
- struct {
- void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
- int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
- void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
- struct radeon_semaphore *semaphore, bool emit_wait);
- } ring[RADEON_NUM_RINGS];
-
- int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*irq_set)(struct radeon_device *rdev);
- int (*irq_process)(struct radeon_device *rdev);
- u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
- int (*cs_parse)(struct radeon_cs_parser *p);
- int (*copy_blit)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence *fence);
- int (*copy_dma)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence *fence);
- int (*copy)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence *fence);
- uint32_t (*get_engine_clock)(struct radeon_device *rdev);
- void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
- uint32_t (*get_memory_clock)(struct radeon_device *rdev);
- void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
- int (*get_pcie_lanes)(struct radeon_device *rdev);
- void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
- void (*set_clock_gating)(struct radeon_device *rdev, int enable);
- int (*set_surface_reg)(struct radeon_device *rdev, int reg,
- uint32_t tiling_flags, uint32_t pitch,
- uint32_t offset, uint32_t obj_size);
- void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
- void (*bandwidth_update)(struct radeon_device *rdev);
- void (*hpd_init)(struct radeon_device *rdev);
- void (*hpd_fini)(struct radeon_device *rdev);
- bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
- void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
@@ -1190,17 +1137,99 @@ struct radeon_asic {
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ /* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
+ /* wait for mc_idle */
+ int (*mc_wait_for_idle)(struct radeon_device *rdev);
+ /* gart */
+ struct {
+ void (*tlb_flush)(struct radeon_device *rdev);
+ int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+ } gart;
+ /* ring specific callbacks */
+ struct {
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+ void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ } ring[RADEON_NUM_RINGS];
+ /* irqs */
+ struct {
+ int (*set)(struct radeon_device *rdev);
+ int (*process)(struct radeon_device *rdev);
+ } irq;
+ /* displays */
+ struct {
+ /* display watermarks */
+ void (*bandwidth_update)(struct radeon_device *rdev);
+ /* get frame count */
+ u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
+ /* wait for vblank */
+ void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
+ } display;
+ /* copy functions for bo handling */
+ struct {
+ int (*blit)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence);
+ u32 blit_ring_index;
+ int (*dma)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence);
+ u32 dma_ring_index;
+ /* method used for bo copy */
+ int (*copy)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence);
+ /* ring used for bo copies */
+ u32 copy_ring_index;
+ } copy;
+ /* surfaces */
+ struct {
+ int (*set_reg)(struct radeon_device *rdev, int reg,
+ uint32_t tiling_flags, uint32_t pitch,
+ uint32_t offset, uint32_t obj_size);
+ void (*clear_reg)(struct radeon_device *rdev, int reg);
+ } surface;
+ /* hotplug detect */
+ struct {
+ void (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+ bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ } hpd;
/* power management */
- void (*pm_misc)(struct radeon_device *rdev);
- void (*pm_prepare)(struct radeon_device *rdev);
- void (*pm_finish)(struct radeon_device *rdev);
- void (*pm_init_profile)(struct radeon_device *rdev);
- void (*pm_get_dynpm_state)(struct radeon_device *rdev);
+ struct {
+ void (*misc)(struct radeon_device *rdev);
+ void (*prepare)(struct radeon_device *rdev);
+ void (*finish)(struct radeon_device *rdev);
+ void (*init_profile)(struct radeon_device *rdev);
+ void (*get_dynpm_state)(struct radeon_device *rdev);
+ uint32_t (*get_engine_clock)(struct radeon_device *rdev);
+ void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+ uint32_t (*get_memory_clock)(struct radeon_device *rdev);
+ void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+ int (*get_pcie_lanes)(struct radeon_device *rdev);
+ void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+ void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+ } pm;
/* pageflipping */
- void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
- u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
- void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+ struct {
+ void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+ } pflip;
};
/*
@@ -1491,8 +1520,6 @@ struct radeon_device {
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
- /* ring used for bo copies */
- u32 copy_ring;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1648,47 +1675,53 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
-#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
-#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
-#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
-#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
-#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
-#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
+#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
+#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
-#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
-#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
-#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
-#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
-#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
-#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
-#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
-#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
-#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
-#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
-#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
-#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
-#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
-#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
-#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
-#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
+#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
+#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
+#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
+#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
+#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
+#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
+#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
+#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
-#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
-#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
-#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
-#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
-#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
-#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
/* Common functions */
/* AGP */
@@ -1750,6 +1783,16 @@ int r600_vram_scratch_init(struct radeon_device *rdev);
void r600_vram_scratch_fini(struct radeon_device *rdev);
/*
+ * r600 cs checking helper
+ */
+unsigned r600_mip_minify(unsigned size, unsigned level);
+bool r600_fmt_is_valid_color(u32 format);
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
+int r600_fmt_get_blocksize(u32 format);
+int r600_fmt_get_nblocksx(u32 format, u32 w);
+int r600_fmt_get_nblocksy(u32 format, u32 h);
+
+/*
* r600 functions used by radeon_encoder.c
*/
extern void r600_hdmi_enable(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 36a6192..479c89e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -114,13 +114,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+ rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
@@ -136,48 +136,70 @@ static struct radeon_asic r100_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r100_pci_gart_tlb_flush,
+ .set_page = &r100_pci_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .cs_parse = &r100_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = NULL,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &r100_pm_misc,
- .pm_prepare = &r100_pm_prepare,
- .pm_finish = &r100_pm_finish,
- .pm_init_profile = &r100_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
};
static struct radeon_asic r200_asic = {
@@ -188,47 +210,70 @@ static struct radeon_asic r200_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r100_pci_gart_tlb_flush,
+ .set_page = &r100_pci_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .cs_parse = &r100_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &r100_pm_misc,
- .pm_prepare = &r100_pm_prepare,
- .pm_finish = &r100_pm_finish,
- .pm_init_profile = &r100_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
};
static struct radeon_asic r300_asic = {
@@ -239,48 +284,70 @@ static struct radeon_asic r300_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r100_pci_gart_tlb_flush,
+ .set_page = &r100_pci_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &r100_pm_misc,
- .pm_prepare = &r100_pm_prepare,
- .pm_finish = &r100_pm_finish,
- .pm_init_profile = &r100_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
};
static struct radeon_asic r300_asic_pcie = {
@@ -291,47 +358,70 @@ static struct radeon_asic r300_asic_pcie = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &r100_pm_misc,
- .pm_prepare = &r100_pm_prepare,
- .pm_finish = &r100_pm_finish,
- .pm_init_profile = &r100_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
};
static struct radeon_asic r420_asic = {
@@ -342,48 +432,70 @@ static struct radeon_asic r420_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &r100_pm_misc,
- .pm_prepare = &r100_pm_prepare,
- .pm_finish = &r100_pm_finish,
- .pm_init_profile = &r420_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
};
static struct radeon_asic rs400_asic = {
@@ -394,48 +506,70 @@ static struct radeon_asic rs400_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
- .gart_tlb_flush = &rs400_gart_tlb_flush,
- .gart_set_page = &rs400_gart_set_page,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rs400_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rs400_gart_tlb_flush,
+ .set_page = &rs400_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &r100_pm_misc,
- .pm_prepare = &r100_pm_prepare,
- .pm_finish = &r100_pm_finish,
- .pm_init_profile = &r100_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &r100_pre_page_flip,
- .page_flip = &r100_page_flip,
- .post_page_flip = &r100_post_page_flip,
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
};
static struct radeon_asic rs600_asic = {
@@ -446,48 +580,70 @@ static struct radeon_asic rs600_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
- .gart_tlb_flush = &rs600_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rs600_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rs600_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rs600_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &rs600_pm_misc,
- .pm_prepare = &rs600_pm_prepare,
- .pm_finish = &rs600_pm_finish,
- .pm_init_profile = &r420_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rs600_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
};
static struct radeon_asic rs690_asic = {
@@ -498,48 +654,70 @@ static struct radeon_asic rs690_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
- .gart_tlb_flush = &rs400_gart_tlb_flush,
- .gart_set_page = &rs400_gart_set_page,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rs690_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rs400_gart_tlb_flush,
+ .set_page = &rs400_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r200_copy_dma,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rs690_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &rs600_pm_misc,
- .pm_prepare = &rs600_pm_prepare,
- .pm_finish = &rs600_pm_finish,
- .pm_init_profile = &r420_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r200_copy_dma,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
};
static struct radeon_asic rv515_asic = {
@@ -550,48 +728,70 @@ static struct radeon_asic rv515_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rv515_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &rs600_pm_misc,
- .pm_prepare = &rs600_pm_prepare,
- .pm_finish = &rs600_pm_finish,
- .pm_init_profile = &r420_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
};
static struct radeon_asic r520_asic = {
@@ -602,48 +802,70 @@ static struct radeon_asic r520_asic = {
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r520_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
}
},
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
- .gui_idle = &r100_gui_idle,
- .pm_misc = &rs600_pm_misc,
- .pm_prepare = &rs600_pm_prepare,
- .pm_finish = &rs600_pm_finish,
- .pm_init_profile = &r420_pm_init_profile,
- .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rv515_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
};
static struct radeon_asic r600_asic = {
@@ -654,47 +876,69 @@ static struct radeon_asic r600_asic = {
.vga_set_state = &r600_vga_set_state,
.gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r600_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
}
},
- .irq_set = &r600_irq_set,
- .irq_process = &r600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .cs_parse = &r600_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = NULL,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &r600_hpd_init,
- .hpd_fini = &r600_hpd_fini,
- .hpd_sense = &r600_hpd_sense,
- .hpd_set_polarity = &r600_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
- .gui_idle = &r600_gui_idle,
- .pm_misc = &r600_pm_misc,
- .pm_prepare = &rs600_pm_prepare,
- .pm_finish = &rs600_pm_finish,
- .pm_init_profile = &r600_pm_init_profile,
- .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .irq = {
+ .set = &r600_irq_set,
+ .process = &r600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rv515_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r600_hpd_init,
+ .fini = &r600_hpd_fini,
+ .sense = &r600_hpd_sense,
+ .set_polarity = &r600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
};
static struct radeon_asic rs780_asic = {
@@ -705,47 +949,69 @@ static struct radeon_asic rs780_asic = {
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r600_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
}
},
- .irq_set = &r600_irq_set,
- .irq_process = &r600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .cs_parse = &r600_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = NULL,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = NULL,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &rs690_bandwidth_update,
- .hpd_init = &r600_hpd_init,
- .hpd_fini = &r600_hpd_fini,
- .hpd_sense = &r600_hpd_sense,
- .hpd_set_polarity = &r600_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
- .gui_idle = &r600_gui_idle,
- .pm_misc = &r600_pm_misc,
- .pm_prepare = &rs600_pm_prepare,
- .pm_finish = &rs600_pm_finish,
- .pm_init_profile = &rs780_pm_init_profile,
- .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rs600_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .irq = {
+ .set = &r600_irq_set,
+ .process = &r600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rs690_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r600_hpd_init,
+ .fini = &r600_hpd_fini,
+ .sense = &r600_hpd_sense,
+ .set_polarity = &r600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &rs780_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
};
static struct radeon_asic rv770_asic = {
@@ -756,47 +1022,69 @@ static struct radeon_asic rv770_asic = {
.asic_reset = &r600_asic_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r600_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
}
},
- .irq_set = &r600_irq_set,
- .irq_process = &r600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .cs_parse = &r600_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = NULL,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &r600_hpd_init,
- .hpd_fini = &r600_hpd_fini,
- .hpd_sense = &r600_hpd_sense,
- .hpd_set_polarity = &r600_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
- .gui_idle = &r600_gui_idle,
- .pm_misc = &rv770_pm_misc,
- .pm_prepare = &rs600_pm_prepare,
- .pm_finish = &rs600_pm_finish,
- .pm_init_profile = &r600_pm_init_profile,
- .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &rs600_pre_page_flip,
- .page_flip = &rv770_page_flip,
- .post_page_flip = &rs600_post_page_flip,
+ .irq = {
+ .set = &r600_irq_set,
+ .process = &r600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rv515_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r600_hpd_init,
+ .fini = &r600_hpd_fini,
+ .sense = &r600_hpd_sense,
+ .set_polarity = &r600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rv770_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rv770_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
};
static struct radeon_asic evergreen_asic = {
@@ -807,47 +1095,69 @@ static struct radeon_asic evergreen_asic = {
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
}
},
- .irq_set = &evergreen_irq_set,
- .irq_process = &evergreen_irq_process,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .cs_parse = &evergreen_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = NULL,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &evergreen_bandwidth_update,
- .hpd_init = &evergreen_hpd_init,
- .hpd_fini = &evergreen_hpd_fini,
- .hpd_sense = &evergreen_hpd_sense,
- .hpd_set_polarity = &evergreen_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
- .gui_idle = &r600_gui_idle,
- .pm_misc = &evergreen_pm_misc,
- .pm_prepare = &evergreen_pm_prepare,
- .pm_finish = &evergreen_pm_finish,
- .pm_init_profile = &r600_pm_init_profile,
- .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
};
static struct radeon_asic sumo_asic = {
@@ -858,47 +1168,69 @@ static struct radeon_asic sumo_asic = {
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
- }
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ },
+ },
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &sumo_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
},
- .irq_set = &evergreen_irq_set,
- .irq_process = &evergreen_irq_process,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .cs_parse = &evergreen_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = NULL,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = NULL,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &evergreen_bandwidth_update,
- .hpd_init = &evergreen_hpd_init,
- .hpd_fini = &evergreen_hpd_fini,
- .hpd_sense = &evergreen_hpd_sense,
- .hpd_set_polarity = &evergreen_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
- .gui_idle = &r600_gui_idle,
- .pm_misc = &evergreen_pm_misc,
- .pm_prepare = &evergreen_pm_prepare,
- .pm_finish = &evergreen_pm_finish,
- .pm_init_profile = &sumo_pm_init_profile,
- .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic btc_asic = {
@@ -909,47 +1241,69 @@ static struct radeon_asic btc_asic = {
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
}
},
- .irq_set = &evergreen_irq_set,
- .irq_process = &evergreen_irq_process,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .cs_parse = &evergreen_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = NULL,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &evergreen_bandwidth_update,
- .hpd_init = &evergreen_hpd_init,
- .hpd_fini = &evergreen_hpd_fini,
- .hpd_sense = &evergreen_hpd_sense,
- .hpd_set_polarity = &evergreen_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
- .gui_idle = &r600_gui_idle,
- .pm_misc = &evergreen_pm_misc,
- .pm_prepare = &evergreen_pm_prepare,
- .pm_finish = &evergreen_pm_finish,
- .pm_init_profile = &r600_pm_init_profile,
- .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
};
static const struct radeon_vm_funcs cayman_vm_funcs = {
@@ -970,60 +1324,88 @@ static struct radeon_asic cayman_asic = {
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
}
},
- .irq_set = &evergreen_irq_set,
- .irq_process = &evergreen_irq_process,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .cs_parse = &evergreen_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = NULL,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &evergreen_bandwidth_update,
- .hpd_init = &evergreen_hpd_init,
- .hpd_fini = &evergreen_hpd_fini,
- .hpd_sense = &evergreen_hpd_sense,
- .hpd_set_polarity = &evergreen_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
- .gui_idle = &r600_gui_idle,
- .pm_misc = &evergreen_pm_misc,
- .pm_prepare = &evergreen_pm_prepare,
- .pm_finish = &evergreen_pm_finish,
- .pm_init_profile = &r600_pm_init_profile,
- .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
};
int radeon_asic_init(struct radeon_device *rdev)
@@ -1036,9 +1418,6 @@ int radeon_asic_init(struct radeon_device *rdev)
else
rdev->num_crtc = 2;
- /* set the ring used for bo copies */
- rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
-
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -1068,10 +1447,10 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &r420_asic;
/* handle macs */
if (rdev->bios == NULL) {
- rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
- rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
- rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
- rdev->asic->set_memory_clock = NULL;
+ rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
+ rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
+ rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
+ rdev->asic->pm.set_memory_clock = NULL;
}
break;
case CHIP_RS400:
@@ -1152,8 +1531,8 @@ int radeon_asic_init(struct radeon_device *rdev)
}
if (rdev->flags & RADEON_IS_IGP) {
- rdev->asic->get_memory_clock = NULL;
- rdev->asic->set_memory_clock = NULL;
+ rdev->asic->pm.get_memory_clock = NULL;
+ rdev->asic->pm.set_memory_clock = NULL;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 6304aef..b8f0a16 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -63,7 +63,7 @@ int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-void r100_ring_start(struct radeon_device *rdev);
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -109,7 +109,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
-int r100_ib_test(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -139,6 +139,8 @@ extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
@@ -159,7 +161,7 @@ extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
-extern void r300_ring_start(struct radeon_device *rdev);
+extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
@@ -176,6 +178,7 @@ extern int rv370_pcie_gart_init(struct radeon_device *rdev);
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r420,r423,rv410
@@ -206,6 +209,7 @@ int rs400_gart_enable(struct radeon_device *rdev);
void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
+extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
/*
* rs600.
@@ -236,7 +240,8 @@ extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
-
+extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
/*
* rs690,rs740
@@ -251,6 +256,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2);
+extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
/*
* rv515
@@ -267,7 +273,7 @@ int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-void rv515_ring_start(struct radeon_device *rdev);
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
@@ -278,13 +284,14 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_clock_startup(struct radeon_device *rdev);
void rv515_debugfs(struct radeon_device *rdev);
-
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r520,rv530,rv560,rv570,r580
*/
int r520_init(struct radeon_device *rdev);
int r520_resume(struct radeon_device *rdev);
+int r520_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
@@ -312,7 +319,7 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-int r600_ib_test(struct radeon_device *rdev, int ring);
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
@@ -375,6 +382,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -423,8 +431,10 @@ extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
/*
* cayman
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1f53ae7..7354137 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -442,6 +442,20 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
+
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+ if ((dev->pdev->device == 0x9802) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ *line_mux = 0x3103;
+ } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ }
+ }
+
+
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 815f234..fef7b72 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -43,17 +43,19 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r)
- return r;
-
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
+ r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev));
+ if (r)
+ return r;
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
fence);
break;
case RADEON_BENCHMARK_COPY_BLIT:
+ r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev));
+ if (r)
+ return r;
r = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
fence);
@@ -129,7 +131,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
/* r100 doesn't have dma engine so skip the test */
/* also, VRAM-to-VRAM test doesn't make much sense for DMA */
/* skip it as well if domains are the same */
- if ((rdev->asic->copy_dma) && (sdomain != ddomain)) {
+ if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n);
if (time < 0)
@@ -208,22 +210,22 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
break;
case 3:
/* GTT to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 65536; i <<= 1)
- radeon_benchmark_move(rdev, i*1024,
+ for (i = 1; i <= 16384; i <<= 1)
+ radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 4:
/* VRAM to GTT, buffer size sweep, powers of 2 */
- for (i = 1; i <= 65536; i <<= 1)
- radeon_benchmark_move(rdev, i*1024,
+ for (i = 1; i <= 16384; i <<= 1)
+ radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 5:
/* VRAM to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 65536; i <<= 1)
- radeon_benchmark_move(rdev, i*1024,
+ for (i = 1; i <= 16384; i <<= 1)
+ radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
new file mode 100644
index 0000000..4ecbe72
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_blit_common.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ * Copyright 2012 Alcatel-Lucent, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RADEON_BLIT_COMMON_H__
+
+#define DI_PT_RECTLIST 0x11
+#define DI_INDEX_SIZE_16_BIT 0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8 0x1
+#define FMT_5_6_5 0x8
+#define FMT_8_8_8_8 0x1a
+#define COLOR_8 0x1
+#define COLOR_5_6_5 0x8
+#define COLOR_8_8_8_8 0x1a
+
+#define RECT_UNIT_H 32
+#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
+
+#define __RADEON_BLIT_COMMON_H__
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index b6e18c8..6ae0c75 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -334,7 +334,7 @@ void radeon_get_clock_info(struct drm_device *dev)
if (!rdev->clock.default_sclk)
rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
- if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
+ if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
rdev->pm.current_sclk = rdev->clock.default_sclk;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8c9a811..64774ac 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -827,6 +827,27 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
return ret;
}
+static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enum drm_connector_status status;
+
+ /* We only trust HPD on R600 and newer ASICS. */
+ if (rdev->family >= CHIP_R600
+ && radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+ if (connector->status == status)
+ return true;
+ }
+
+ return false;
+}
+
/*
* DVI is complicated
* Do a DDC probe, if DDC probe passes, get the full EDID so
@@ -851,6 +872,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
+ if (!force && radeon_check_hpd_status_unchanged(connector))
+ return connector->status;
+
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
if (dret) {
@@ -946,6 +970,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
encoder = obj_to_encoder(obj);
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC ||
+ encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+ continue;
+
encoder_funcs = encoder->helper_private;
if (encoder_funcs->detect) {
if (ret != connector_status_connected) {
@@ -1250,6 +1278,9 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ if (!force && radeon_check_hpd_status_unchanged(connector))
+ return connector->status;
+
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 72ae826..0ebb7d4 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2115,6 +2115,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
break;
}
+ pci_set_master(dev->pdev);
+
if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
else if (pci_is_pcie(dev->pdev))
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e64bec4..d9d9f5a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -85,12 +85,6 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
radeon_bo_list_add_object(&p->relocs[i].lobj,
&p->validated);
- if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
- struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
- if (!radeon_fence_signaled(fence)) {
- p->sync_to_ring[fence->ring] = true;
- }
- }
} else
p->relocs[i].handle = 0;
}
@@ -118,11 +112,24 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
+ bool sync_to_ring[RADEON_NUM_RINGS] = { };
int i, r;
+ for (i = 0; i < p->nrelocs; i++) {
+ if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
+ continue;
+
+ if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
+ struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
+ if (!radeon_fence_signaled(fence)) {
+ sync_to_ring[fence->ring] = true;
+ }
+ }
+ }
+
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
/* no need to sync to our own or unused rings */
- if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
+ if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready)
continue;
if (!p->ib->fence->semaphore) {
@@ -236,20 +243,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if ((p->cs_flags & RADEON_CS_USE_VM) &&
!p->rdev->vm_manager.enabled) {
DRM_ERROR("VM not active on asic!\n");
- if (p->chunk_relocs_idx != -1)
- kfree(p->chunks[p->chunk_relocs_idx].kdata);
- if (p->chunk_flags_idx != -1)
- kfree(p->chunks[p->chunk_flags_idx].kdata);
return -EINVAL;
}
- if (radeon_cs_get_ring(p, ring, priority)) {
- if (p->chunk_relocs_idx != -1)
- kfree(p->chunks[p->chunk_relocs_idx].kdata);
- if (p->chunk_flags_idx != -1)
- kfree(p->chunks[p->chunk_flags_idx].kdata);
+ if (radeon_cs_get_ring(p, ring, priority))
return -EINVAL;
- }
/* deal with non-vm */
@@ -264,11 +262,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
- p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
- kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL)
return -ENOMEM;
- }
p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
p->chunks[p->chunk_ib_idx].last_copied_page = -1;
@@ -341,7 +336,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
return r;
}
parser->ib->length_dw = ib_chunk->length_dw;
- r = radeon_cs_parse(parser);
+ r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index fde25c0..42acc64 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -151,7 +151,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t height)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
+ struct radeon_bo *robj;
uint64_t gpu_addr;
int ret;
@@ -173,7 +175,15 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
}
- ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ robj = gem_to_radeon_bo(obj);
+ ret = radeon_bo_reserve(robj, false);
+ if (unlikely(ret != 0))
+ goto fail;
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &gpu_addr);
+ radeon_bo_unreserve(robj);
if (ret)
goto fail;
@@ -181,14 +191,18 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
radeon_crtc->cursor_height = height;
radeon_lock_cursor(crtc, true);
- /* XXX only 27 bit offset for legacy cursor */
radeon_set_cursor(crtc, obj, gpu_addr);
radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
unpin:
if (radeon_crtc->cursor_bo) {
- radeon_gem_object_unpin(radeon_crtc->cursor_bo);
+ robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ ret = radeon_bo_reserve(robj, false);
+ if (likely(ret == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3d31433..1ebcef2 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -303,8 +303,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
if (update_pending &&
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
&vpos, &hpos)) &&
- (vpos >=0) &&
- (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
+ ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
+ (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
+ /* crtc didn't flip in this target vblank interval,
+ * but flip is pending in crtc. Based on the current
+ * scanout position we know that the current frame is
+ * (nearly) complete and the flip will (likely)
+ * complete before the start of the next frame.
+ */
+ update_pending = 0;
+ }
+ if (update_pending) {
/* crtc didn't flip in this target vblank interval,
* but flip is pending in crtc. It will complete it
* in next vblank interval, so complete the flip at
@@ -393,7 +402,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto pflip_cleanup;
}
- r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ /* Only 27 bit offset for legacy CRTC */
+ r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
r = -EINVAL;
@@ -1136,11 +1147,6 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = {
.output_poll_changed = radeon_output_poll_changed
};
-struct drm_prop_enum_list {
- int type;
- char *name;
-};
-
static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
{ { 0, "driver" },
{ 1, "bios" },
@@ -1165,86 +1171,53 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
- int i, sz;
+ int sz;
if (rdev->is_atom_bios) {
rdev->mode_info.coherent_mode_property =
- drm_property_create(rdev->ddev,
- DRM_MODE_PROP_RANGE,
- "coherent", 2);
+ drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
if (!rdev->mode_info.coherent_mode_property)
return -ENOMEM;
-
- rdev->mode_info.coherent_mode_property->values[0] = 0;
- rdev->mode_info.coherent_mode_property->values[1] = 1;
}
if (!ASIC_IS_AVIVO(rdev)) {
sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
rdev->mode_info.tmds_pll_property =
- drm_property_create(rdev->ddev,
- DRM_MODE_PROP_ENUM,
- "tmds_pll", sz);
- for (i = 0; i < sz; i++) {
- drm_property_add_enum(rdev->mode_info.tmds_pll_property,
- i,
- radeon_tmds_pll_enum_list[i].type,
- radeon_tmds_pll_enum_list[i].name);
- }
+ drm_property_create_enum(rdev->ddev, 0,
+ "tmds_pll",
+ radeon_tmds_pll_enum_list, sz);
}
rdev->mode_info.load_detect_property =
- drm_property_create(rdev->ddev,
- DRM_MODE_PROP_RANGE,
- "load detection", 2);
+ drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
- rdev->mode_info.load_detect_property->values[0] = 0;
- rdev->mode_info.load_detect_property->values[1] = 1;
drm_mode_create_scaling_mode_property(rdev->ddev);
sz = ARRAY_SIZE(radeon_tv_std_enum_list);
rdev->mode_info.tv_std_property =
- drm_property_create(rdev->ddev,
- DRM_MODE_PROP_ENUM,
- "tv standard", sz);
- for (i = 0; i < sz; i++) {
- drm_property_add_enum(rdev->mode_info.tv_std_property,
- i,
- radeon_tv_std_enum_list[i].type,
- radeon_tv_std_enum_list[i].name);
- }
+ drm_property_create_enum(rdev->ddev, 0,
+ "tv standard",
+ radeon_tv_std_enum_list, sz);
sz = ARRAY_SIZE(radeon_underscan_enum_list);
rdev->mode_info.underscan_property =
- drm_property_create(rdev->ddev,
- DRM_MODE_PROP_ENUM,
- "underscan", sz);
- for (i = 0; i < sz; i++) {
- drm_property_add_enum(rdev->mode_info.underscan_property,
- i,
- radeon_underscan_enum_list[i].type,
- radeon_underscan_enum_list[i].name);
- }
+ drm_property_create_enum(rdev->ddev, 0,
+ "underscan",
+ radeon_underscan_enum_list, sz);
rdev->mode_info.underscan_hborder_property =
- drm_property_create(rdev->ddev,
- DRM_MODE_PROP_RANGE,
- "underscan hborder", 2);
+ drm_property_create_range(rdev->ddev, 0,
+ "underscan hborder", 0, 128);
if (!rdev->mode_info.underscan_hborder_property)
return -ENOMEM;
- rdev->mode_info.underscan_hborder_property->values[0] = 0;
- rdev->mode_info.underscan_hborder_property->values[1] = 128;
rdev->mode_info.underscan_vborder_property =
- drm_property_create(rdev->ddev,
- DRM_MODE_PROP_RANGE,
- "underscan vborder", 2);
+ drm_property_create_range(rdev->ddev, 0,
+ "underscan vborder", 0, 128);
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
- rdev->mode_info.underscan_vborder_property->values[0] = 0;
- rdev->mode_info.underscan_vborder_property->values[1] = 128;
return 0;
}
@@ -1290,6 +1263,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.max_height = 4096;
}
+ rdev->ddev->mode_config.preferred_depth = 24;
+ rdev->ddev->mode_config.prefer_shadow = 1;
+
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
ret = radeon_modeset_create_props(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8032f1f..498d21d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -54,10 +54,11 @@
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
* 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
- * 2.13.0 - virtual memory support
+ * 2.13.0 - virtual memory support, streamout
+ * 2.14.0 - add evergreen tiling informations
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 13
+#define KMS_DRIVER_MINOR 14
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 195471c..5906914 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -164,7 +164,10 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
+ /* Only 27 bit offset for legacy CRTC */
+ ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
@@ -263,11 +266,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
if (info->screen_base == NULL) {
ret = -ENOSPC;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 7337850..c7008b5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -75,32 +75,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
return 0;
}
-int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
- uint64_t *gpu_addr)
-{
- struct radeon_bo *robj = gem_to_radeon_bo(obj);
- int r;
-
- r = radeon_bo_reserve(robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(robj, pin_domain, gpu_addr);
- radeon_bo_unreserve(robj);
- return r;
-}
-
-void radeon_gem_object_unpin(struct drm_gem_object *obj)
-{
- struct radeon_bo *robj = gem_to_radeon_bo(obj);
- int r;
-
- r = radeon_bo_reserve(robj, false);
- if (likely(r == 0)) {
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
- }
-}
-
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 98a8ad6..85bcfc8 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -26,10 +26,15 @@
#include <linux/export.h>
#include "drmP.h"
+#include "drm_edid.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
+extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num);
+extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
+
/**
* radeon_ddc_probe
*
@@ -41,13 +46,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
int ret;
struct i2c_msg msgs[] = {
{
- .addr = 0x50,
+ .addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &out,
},
{
- .addr = 0x50,
+ .addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = 8,
.buf = buf,
@@ -882,6 +887,11 @@ static const struct i2c_algorithm radeon_i2c_algo = {
.functionality = radeon_hw_i2c_func,
};
+static const struct i2c_algorithm radeon_atom_i2c_algo = {
+ .master_xfer = radeon_atom_hw_i2c_xfer,
+ .functionality = radeon_atom_hw_i2c_func,
+};
+
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
@@ -914,6 +924,18 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
DRM_ERROR("Failed to register hw i2c %s\n", name);
goto out_free;
}
+ } else if (rec->hw_capable &&
+ radeon_hw_i2c &&
+ ASIC_IS_DCE3(rdev)) {
+ /* hw i2c using atom */
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c hw bus %s", name);
+ i2c->adapter.algo = &radeon_atom_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register hw i2c %s\n", name);
+ goto out_free;
+ }
} else {
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
@@ -925,10 +947,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->algo.bit.setscl = set_clock;
i2c->algo.bit.getsda = get_data;
i2c->algo.bit.getscl = get_clock;
- i2c->algo.bit.udelay = 20;
- /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
- * make this, 2 jiffies is a lot more reliable */
- i2c->algo.bit.timeout = 2;
+ i2c->algo.bit.udelay = 10;
+ i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */
i2c->algo.bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d335288..1986eba 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -57,6 +57,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
+ pci_set_master(dev->pdev);
+
/* update BUS flag */
if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 25a19c4..210317c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -419,7 +419,9 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ /* Only 27 bit offset for legacy CRTC */
+ r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27,
+ &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d45df17..91541e6 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -224,7 +224,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
*bo = NULL;
}
-int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
+ u64 *gpu_addr)
{
int r, i;
@@ -232,6 +233,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
bo->pin_count++;
if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo);
+ WARN_ON_ONCE(max_offset != 0);
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
@@ -239,6 +241,15 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
/* force to pin into visible video ram */
bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
}
+ if (max_offset) {
+ u64 lpfn = max_offset >> PAGE_SHIFT;
+
+ if (!bo->placement.lpfn)
+ bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+
+ if (lpfn < bo->placement.lpfn)
+ bo->placement.lpfn = lpfn;
+ }
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
@@ -252,6 +263,11 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
return r;
}
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+{
+ return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
+}
+
int radeon_bo_unpin(struct radeon_bo *bo)
{
int r, i;
@@ -445,8 +461,54 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
uint32_t tiling_flags, uint32_t pitch)
{
+ struct radeon_device *rdev = bo->rdev;
int r;
+ if (rdev->family >= CHIP_CEDAR) {
+ unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
+
+ bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+ bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+ mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+ tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+ stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
+ switch (bankw) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (bankh) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (mtaspect) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (tilesplit > 6) {
+ return -EINVAL;
+ }
+ if (stilesplit > 6) {
+ return -EINVAL;
+ }
+ }
r = radeon_bo_reserve(bo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index cde4303..f9104be 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -118,6 +118,8 @@ extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
+ u64 max_offset, u64 *gpu_addr);
extern int radeon_bo_unpin(struct radeon_bo *bo);
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 095148e..3575129 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -221,7 +221,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
}
/* set memory clock */
- if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
@@ -863,11 +863,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
- if (rdev->asic->get_memory_clock)
+ if (rdev->asic->pm.get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
if (rdev->pm.current_vddc)
seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
- if (rdev->asic->get_pcie_lanes)
+ if (rdev->asic->pm.get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index b4ce864..5098634 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -539,9 +539,11 @@
#define RADEON_CRTC2_PITCH 0x032c
#define RADEON_CRTC_STATUS 0x005c
+# define RADEON_CRTC_VBLANK_CUR (1 << 0)
# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC2_STATUS 0x03fc
+# define RADEON_CRTC2_VBLANK_CUR (1 << 0)
# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 92c9ea4..3056620 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -478,7 +478,9 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_ib *ib = node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
unsigned i;
if (ib == NULL) {
@@ -495,6 +497,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
#endif
int radeon_debugfs_ring_init(struct radeon_device *rdev)
@@ -517,10 +520,11 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
+ radeon_debugfs_ib_idx[i] = i;
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
radeon_debugfs_ib_list[i].driver_features = 0;
- radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
+ radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
}
return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
RADEON_IB_POOL_SIZE);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c421e77..f493c64 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -226,7 +226,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
int r, i;
rdev = radeon_get_rdev(bo->bdev);
- r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
+ r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
if (unlikely(r)) {
return r;
}
@@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
- if (!rdev->ring[rdev->copy_ring].ready) {
+ if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -266,7 +266,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (rdev->family >= CHIP_R600) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
/* no need to sync to our own or unused rings */
- if (i == rdev->copy_ring || !rdev->ring[i].ready)
+ if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready)
continue;
if (!fence->semaphore) {
@@ -283,12 +283,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
- r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
+ r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
/* FIXME: handle ring lock error */
if (r)
continue;
- radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
+ radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
}
}
@@ -410,7 +410,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
radeon_move_null(bo, new_mem);
return 0;
}
- if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
+ if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
+ rdev->asic->copy.copy == NULL) {
/* use memcpy */
goto memcpy;
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 2316977..aea63c4 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,5 +1,8 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x000088C4 VGT_CACHE_INVALIDATION
0x000088D4 VGT_GS_VERTEX_REUSE
@@ -77,7 +80,6 @@ cayman 0x9400
0x0002802C DB_DEPTH_CLEAR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
-0x0002805C DB_DEPTH_SLICE
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
@@ -206,7 +208,6 @@ cayman 0x9400
0x00028344 PA_SC_VPORT_ZMAX_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
-0x00028350 SX_MISC
0x00028354 SX_SURFACE_SYNC
0x0002835C SX_SCATTER_EXPORT_SIZE
0x00028380 SQ_VTX_SEMANTIC_0
@@ -512,6 +513,13 @@ cayman 0x9400
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x00028B54 VGT_SHADER_STAGES_EN
0x00028B58 VGT_LS_HS_CONFIG
@@ -551,6 +559,18 @@ cayman 0x9400
0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
0x00028C8C CB_COLOR0_CLEAR_WORD0
0x00028C90 CB_COLOR0_CLEAR_WORD1
0x00028C94 CB_COLOR0_CLEAR_WORD2
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 161737a2..77c3720 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -4,6 +4,9 @@ evergreen 0x9400
0x00008044 WAIT_UNTIL_POLL_CNTL
0x00008048 WAIT_UNTIL_POLL_MASK
0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x000088C4 VGT_CACHE_INVALIDATION
0x000088D4 VGT_GS_VERTEX_REUSE
@@ -93,7 +96,6 @@ evergreen 0x9400
0x0002802C DB_DEPTH_CLEAR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
-0x0002805C DB_DEPTH_SLICE
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
@@ -222,7 +224,6 @@ evergreen 0x9400
0x00028344 PA_SC_VPORT_ZMAX_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
-0x00028350 SX_MISC
0x00028354 SX_SURFACE_SYNC
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
@@ -522,6 +523,13 @@ evergreen 0x9400
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x00028B54 VGT_SHADER_STAGES_EN
0x00028B58 VGT_LS_HS_CONFIG
@@ -554,6 +562,18 @@ evergreen 0x9400
0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
0x00028C3C PA_SC_AA_MASK
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
0x00028C8C CB_COLOR0_CLEAR_WORD0
0x00028C90 CB_COLOR0_CLEAR_WORD1
0x00028C94 CB_COLOR0_CLEAR_WORD2
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 0380c5c..626c24e 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -3,6 +3,9 @@ r600 0x9400
0x00028230 R7xx_PA_SC_EDGERULE
0x000286C8 R7xx_SPI_THREAD_GROUPING
0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008490 CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
0x000088C4 VGT_CACHE_INVALIDATION
0x00028A50 VGT_ENHANCE
0x000088CC VGT_ES_PER_GS
@@ -38,6 +41,13 @@ r600 0x9400
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028810 PA_CL_CLIP_CNTL
0x00008A14 PA_CL_ENHANCE
0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
@@ -428,7 +438,7 @@ r600 0x9400
0x00028638 SPI_VS_OUT_ID_9
0x00028438 SX_ALPHA_REF
0x00028410 SX_ALPHA_TEST_CONTROL
-0x00028350 SX_MISC
+0x00028354 SX_SURFACE_SYNC
0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009604 TC_INVALIDATE
0x00009400 TD_FILTER4
@@ -743,14 +753,6 @@ r600 0x9400
0x00028114 CB_COLOR5_MASK
0x00028118 CB_COLOR6_MASK
0x0002811C CB_COLOR7_MASK
-0x00028080 CB_COLOR0_VIEW
-0x00028084 CB_COLOR1_VIEW
-0x00028088 CB_COLOR2_VIEW
-0x0002808C CB_COLOR3_VIEW
-0x00028090 CB_COLOR4_VIEW
-0x00028094 CB_COLOR5_VIEW
-0x00028098 CB_COLOR6_VIEW
-0x0002809C CB_COLOR7_VIEW
0x00028808 CB_COLOR_CONTROL
0x0002842C CB_FOG_BLUE
0x00028428 CB_FOG_GREEN
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 866a05b..4cf381b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -430,7 +430,7 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4fc7006..d25cf86 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,25 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ int i;
+
+ if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) {
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)
+ break;
+ udelay(1);
+ }
+ }
+}
+
void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -175,7 +194,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
- rdev->asic->set_pcie_lanes &&
+ rdev->asic->pm.set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
@@ -864,7 +883,7 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index f68dff2..f2c3b9d 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -31,7 +31,7 @@
#include "atom.h"
#include "rs690d.h"
-static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+int rs690_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
@@ -647,7 +647,7 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index c520d06..d8d78fe 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -53,9 +53,8 @@ void rv515_debugfs(struct radeon_device *rdev)
}
}
-void rv515_ring_start(struct radeon_device *rdev)
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
r = radeon_ring_lock(rdev, ring, 64);
@@ -413,7 +412,7 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r100_ib_test(rdev);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c049c0c..c62ae4b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1074,7 +1074,7 @@ static int rv770_startup(struct radeon_device *rdev)
r = r600_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
+ rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
@@ -1114,7 +1114,7 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 8a3e315..031aaaf 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1057,7 +1057,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
DRM_ERROR("indexed drawing command extends "
"beyond end of command buffer\n");
DMA_FLUSH();
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
/* fall through */
case SAVAGE_CMD_DMA_PRIM:
@@ -1076,7 +1077,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
cmdbuf->vb_stride,
cmdbuf->nbox, cmdbuf->box_addr);
if (ret != 0)
- return ret;
+ goto done;
first_draw_cmd = NULL;
}
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 573220c..30d98d1 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -41,6 +41,8 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_sis_private_t *dev_priv;
+ pci_set_master(dev->pdev);
+
dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 747c141..4a87282 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -29,6 +29,8 @@
* Keith Packard.
*/
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h"
@@ -74,7 +76,7 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
ret = agp_bind_memory(mem, node->start);
if (ret)
- printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
+ pr_err("AGP Bind memory failed\n");
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7c3a57d..1f5c67c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -28,6 +28,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
@@ -68,15 +70,13 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
- printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
- printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
- printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
- printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
- printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
- man->available_caching);
- printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
- man->default_caching);
+ pr_err(" has_type: %d\n", man->has_type);
+ pr_err(" use_type: %d\n", man->use_type);
+ pr_err(" flags: 0x%08X\n", man->flags);
+ pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
+ pr_err(" size: %llu\n", man->size);
+ pr_err(" available_caching: 0x%08X\n", man->available_caching);
+ pr_err(" default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
(*man->func->debug)(man, TTM_PFX);
}
@@ -86,16 +86,16 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
{
int i, ret, mem_type;
- printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ pr_err("No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return;
- printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i], mem_type);
+ pr_err(" placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
ttm_mem_type_debug(bo->bdev, mem_type);
}
}
@@ -344,7 +344,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
ret = -ENOMEM;
break;
default:
- printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
+ pr_err("Illegal buffer object type\n");
ret = -EINVAL;
break;
}
@@ -432,7 +432,7 @@ moved:
if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
if (ret)
- printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
+ pr_err("Can not flush read caches\n");
bo->evicted = false;
}
@@ -734,9 +734,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
- printk(KERN_ERR TTM_PFX
- "Failed to expire sync object before "
- "buffer eviction.\n");
+ pr_err("Failed to expire sync object before buffer eviction\n");
}
goto out;
}
@@ -757,9 +755,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
- printk(KERN_ERR TTM_PFX
- "Failed to find memory space for "
- "buffer 0x%p eviction.\n", bo);
+ pr_err("Failed to find memory space for buffer 0x%p eviction\n",
+ bo);
ttm_bo_mem_space_debug(bo, &placement);
}
goto out;
@@ -769,7 +766,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
- printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
goto out;
}
@@ -1180,7 +1177,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) {
- printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
+ pr_err("Out of kernel memory\n");
if (destroy)
(*destroy)(bo);
else
@@ -1191,7 +1188,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
- printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+ pr_err("Illegal buffer object size\n");
if (destroy)
(*destroy)(bo);
else
@@ -1342,8 +1339,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
if (allow_errors) {
return ret;
} else {
- printk(KERN_ERR TTM_PFX
- "Cleanup eviction failed\n");
+ pr_err("Cleanup eviction failed\n");
}
}
spin_lock(&glob->lru_lock);
@@ -1358,14 +1354,14 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
int ret = -EINVAL;
if (mem_type >= TTM_NUM_MEM_TYPES) {
- printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
+ pr_err("Illegal memory type %d\n", mem_type);
return ret;
}
man = &bdev->man[mem_type];
if (!man->has_type) {
- printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
- "memory manager type %u\n", mem_type);
+ pr_err("Trying to take down uninitialized memory manager type %u\n",
+ mem_type);
return ret;
}
@@ -1388,16 +1384,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
- printk(KERN_ERR TTM_PFX
- "Illegal memory manager memory type %u.\n",
- mem_type);
+ pr_err("Illegal memory manager memory type %u\n", mem_type);
return -EINVAL;
}
if (!man->has_type) {
- printk(KERN_ERR TTM_PFX
- "Memory type %u has not been initialized.\n",
- mem_type);
+ pr_err("Memory type %u has not been initialized\n", mem_type);
return 0;
}
@@ -1482,8 +1474,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
if (unlikely(ret != 0)) {
- printk(KERN_ERR TTM_PFX
- "Could not register buffer object swapout.\n");
+ pr_err("Could not register buffer object swapout\n");
goto out_no_shrink;
}
@@ -1516,9 +1507,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
man->use_type = false;
if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
ret = -EBUSY;
- printk(KERN_ERR TTM_PFX
- "DRM memory manager type %d "
- "is not clean.\n", i);
+ pr_err("DRM memory manager type %d is not clean\n",
+ i);
}
man->has_type = false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 5441284..a877813 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -28,6 +28,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include <ttm/ttm_module.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
@@ -262,8 +264,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
- printk(KERN_ERR TTM_PFX
- "Could not find buffer object to map.\n");
+ pr_err("Could not find buffer object to map\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 9eba8e9..23d2ecb 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -25,6 +25,8 @@
*
**************************************************************************/
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
#include "ttm/ttm_page_alloc.h"
@@ -74,9 +76,8 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj)
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
- printk(KERN_INFO TTM_PFX
- "Zone %7s: Used memory at exit: %llu kiB.\n",
- zone->name, (unsigned long long) zone->used_mem >> 10);
+ pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
+ zone->name, (unsigned long long)zone->used_mem >> 10);
kfree(zone);
}
@@ -390,9 +391,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
#endif
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
- printk(KERN_INFO TTM_PFX
- "Zone %7s: Available graphics memory: %llu kiB.\n",
- zone->name, (unsigned long long) zone->max_mem >> 10);
+ pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
+ zone->name, (unsigned long long)zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 93577f2..68daca4 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -49,6 +49,8 @@
* for fast lookup of ref objects given a base object.
*/
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include "ttm/ttm_object.h"
#include "ttm/ttm_module.h"
#include <linux/list.h>
@@ -232,8 +234,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
return NULL;
if (tfile != base->tfile && !base->shareable) {
- printk(KERN_ERR TTM_PFX
- "Attempted access of non-shareable object.\n");
+ pr_err("Attempted access of non-shareable object\n");
ttm_base_object_unref(&base);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 499debd..ebc6fac 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -30,6 +30,9 @@
* - Use page->lru to keep a free list
* - doesn't track currently in use pages
*/
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
@@ -167,18 +170,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
- printk(KERN_ERR TTM_PFX
- "Setting allocation size to %lu "
- "is not allowed. Recommended size is "
- "%lu\n",
+ pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
- printk(KERN_WARNING TTM_PFX
- "Setting allocation size to "
- "larger than %lu is not recommended.\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
@@ -279,8 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
- printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
- npages);
+ pr_err("Failed to set %d pages to wb!\n", npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
}
@@ -315,8 +312,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- printk(KERN_ERR TTM_PFX
- "Failed to allocate memory for pool free operation.\n");
+ pr_err("Failed to allocate memory for pool free operation\n");
return 0;
}
@@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
- printk(KERN_ERR TTM_PFX
- "Failed to set %d pages to uc!\n",
- cpages);
+ pr_err("Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
- printk(KERN_ERR TTM_PFX
- "Failed to set %d pages to wc!\n",
- cpages);
+ pr_err("Failed to set %d pages to wc!\n", cpages);
break;
default:
break;
@@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- printk(KERN_ERR TTM_PFX
- "Unable to allocate table for new pages.");
+ pr_err("Unable to allocate table for new pages\n");
return -ENOMEM;
}
@@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_page(gfp_flags);
if (!p) {
- printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
+ pr_err("Unable to get page %u\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- printk(KERN_ERR TTM_PFX
- "Failed to fill pool (%p).", pool);
+ pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
@@ -675,9 +665,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
- printk(KERN_ERR TTM_PFX
- "Erroneous page count. "
- "Leaking pages.\n");
+ pr_err("Erroneous page count. Leaking pages.\n");
__free_page(pages[i]);
pages[i] = NULL;
}
@@ -689,9 +677,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
- printk(KERN_ERR TTM_PFX
- "Erroneous page count. "
- "Leaking pages.\n");
+ pr_err("Erroneous page count. Leaking pages.\n");
list_add_tail(&pages[i]->lru, &pool->list);
pages[i] = NULL;
pool->npages++;
@@ -740,8 +726,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
p = alloc_page(gfp_flags);
if (!p) {
- printk(KERN_ERR TTM_PFX
- "Unable to allocate page.");
+ pr_err("Unable to allocate page\n");
return -ENOMEM;
}
@@ -781,9 +766,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
- printk(KERN_ERR TTM_PFX
- "Failed to allocate extra pages "
- "for large request.");
+ pr_err("Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate);
return r;
}
@@ -809,7 +792,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
WARN_ON(_manager);
- printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
+ pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
@@ -844,7 +827,7 @@ void ttm_page_alloc_fini(void)
{
int i;
- printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
+ pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 0c46d8c..4f9e548 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,6 +33,8 @@
* when freed).
*/
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/seq_file.h> /* for seq_printf */
@@ -221,18 +223,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
- printk(KERN_ERR TTM_PFX
- "Setting allocation size to %lu "
- "is not allowed. Recommended size is "
- "%lu\n",
+ pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
- printk(KERN_WARNING TTM_PFX
- "Setting allocation size to "
- "larger than %lu is not recommended.\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
@@ -313,15 +310,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
if (pool->type & IS_UC) {
r = set_pages_array_uc(pages, cpages);
if (r)
- pr_err(TTM_PFX
- "%s: Failed to set %d pages to uc!\n",
+ pr_err("%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages);
}
if (pool->type & IS_WC) {
r = set_pages_array_wc(pages, cpages);
if (r)
- pr_err(TTM_PFX
- "%s: Failed to set %d pages to wc!\n",
+ pr_err("%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages);
}
return r;
@@ -387,8 +382,8 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
set_pages_array_wb(pages, npages))
- pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
- pool->dev_name, npages);
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, npages);
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
list_del(&d_page->page_list);
@@ -400,8 +395,8 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
/* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
- pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page);
@@ -430,17 +425,16 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
#if 0
if (nr_free > 1) {
pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
- pool->dev_name, pool->name, current->pid,
- npages_to_free, nr_free);
+ pool->dev_name, pool->name, current->pid,
+ npages_to_free, nr_free);
}
#endif
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- pr_err(TTM_PFX
- "%s: Failed to allocate memory for pool free operation.\n",
- pool->dev_name);
+ pr_err("%s: Failed to allocate memory for pool free operation\n",
+ pool->dev_name);
return 0;
}
INIT_LIST_HEAD(&d_pages);
@@ -723,23 +717,21 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err(TTM_PFX
- "%s: Unable to allocate table for new pages.",
- pool->dev_name);
+ pr_err("%s: Unable to allocate table for new pages\n",
+ pool->dev_name);
return -ENOMEM;
}
if (count > 1) {
pr_debug("%s: (%s:%d) Getting %d pages\n",
- pool->dev_name, pool->name, current->pid,
- count);
+ pool->dev_name, pool->name, current->pid, count);
}
for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) {
- pr_err(TTM_PFX "%s: Unable to get page %u.\n",
- pool->dev_name, i);
+ pr_err("%s: Unable to get page %u\n",
+ pool->dev_name, i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -821,8 +813,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
struct dma_page *d_page;
unsigned cpages = 0;
- pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
- pool->dev_name, pool->name, r);
+ pr_err("%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) {
cpages++;
@@ -1038,8 +1030,8 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
- p->pool->dev_name, p->pool->name, current->pid, nr_free,
- shrink_pages);
+ p->pool->dev_name, p->pool->name, current->pid,
+ nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
/* return estimated number of unused pages in pool */
@@ -1064,7 +1056,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
WARN_ON(_manager);
- printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
+ pr_info("Initializing DMA pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager)
@@ -1097,7 +1089,7 @@ void ttm_dma_page_alloc_fini(void)
{
struct device_pools *p, *t;
- printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
+ pr_info("Finalizing DMA pool allocator\n");
ttm_dma_pool_mm_shrink_fini(_manager);
list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index c10cf5e..fa09daf 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,6 +28,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+#define pr_fmt(fmt) "[TTM] " fmt
+
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
@@ -196,7 +198,7 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
ttm_tt_alloc_page_directory(ttm);
if (!ttm->pages) {
ttm_tt_destroy(ttm);
- printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+ pr_err("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
@@ -229,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
ttm_dma_tt_alloc_page_directory(ttm_dma);
if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
- printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+ pr_err("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
@@ -347,7 +349,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
ttm->num_pages << PAGE_SHIFT,
0);
if (unlikely(IS_ERR(swap_storage))) {
- printk(KERN_ERR "Failed allocating swap storage.\n");
+ pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
}
} else
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
new file mode 100644
index 0000000..0b5e096
--- /dev/null
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -0,0 +1,12 @@
+config DRM_UDL
+ tristate "DisplayLink"
+ depends on DRM && EXPERIMENTAL
+ select DRM_USB
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_DEFERRED_IO
+ select DRM_KMS_HELPER
+ help
+ This is a KMS driver for the USB displaylink video adapters.
+ Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
new file mode 100644
index 0000000..05c7481
--- /dev/null
+++ b/drivers/gpu/drm/udl/Makefile
@@ -0,0 +1,6 @@
+
+ccflags-y := -Iinclude/drm
+
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
+
+obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
new file mode 100644
index 0000000..ba055e9
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+#include "udl_drv.h"
+
+/* dummy connector to just get EDID,
+ all UDL appear to have a DVI-D */
+
+static u8 *udl_get_edid(struct udl_device *udl)
+{
+ u8 *block;
+ char rbuf[3];
+ int ret, i;
+
+ block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (block == NULL)
+ return NULL;
+
+ for (i = 0; i < EDID_LENGTH; i++) {
+ ret = usb_control_msg(udl->ddev->usbdev,
+ usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
+ (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
+ HZ);
+ if (ret < 1) {
+ DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
+ i--;
+ goto error;
+ }
+ block[i] = rbuf[1];
+ }
+
+ return block;
+
+error:
+ kfree(block);
+ return NULL;
+}
+
+static int udl_get_modes(struct drm_connector *connector)
+{
+ struct udl_device *udl = connector->dev->dev_private;
+ struct edid *edid;
+ int ret;
+
+ edid = (struct edid *)udl_get_edid(udl);
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ return ret;
+}
+
+static int udl_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return 0;
+}
+
+static enum drm_connector_status
+udl_detect(struct drm_connector *connector, bool force)
+{
+ if (drm_device_is_unplugged(connector->dev))
+ return connector_status_disconnected;
+ return connector_status_connected;
+}
+
+struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+}
+
+int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void udl_connector_destroy(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+ .get_modes = udl_get_modes,
+ .mode_valid = udl_mode_valid,
+ .best_encoder = udl_best_single_encoder,
+};
+
+struct drm_connector_funcs udl_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = udl_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = udl_connector_destroy,
+ .set_property = udl_connector_set_property,
+};
+
+int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+
+ connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
+ drm_connector_helper_add(connector, &udl_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dirty_info_property,
+ 1);
+ return 0;
+}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
new file mode 100644
index 0000000..5340c5f
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include "drm_usb.h"
+#include "drm_crtc_helper.h"
+#include "udl_drv.h"
+
+static struct drm_driver driver;
+
+static struct usb_device_id id_table[] = {
+ {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+ {},
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+MODULE_LICENSE("GPL");
+
+static int udl_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ return drm_get_usb_dev(interface, id, &driver);
+}
+
+static void udl_usb_disconnect(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+
+ drm_kms_helper_poll_disable(dev);
+ drm_connector_unplug_all(dev);
+ udl_fbdev_unplug(dev);
+ udl_drop_usb(dev);
+ drm_unplug_dev(dev);
+}
+
+static struct vm_operations_struct udl_gem_vm_ops = {
+ .fault = udl_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations udl_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .load = udl_driver_load,
+ .unload = udl_driver_unload,
+
+ /* gem hooks */
+ .gem_init_object = udl_gem_init_object,
+ .gem_free_object = udl_gem_free_object,
+ .gem_vm_ops = &udl_gem_vm_ops,
+
+ .dumb_create = udl_dumb_create,
+ .dumb_map_offset = udl_gem_mmap,
+ .dumb_destroy = udl_dumb_destroy,
+ .fops = &udl_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct usb_driver udl_driver = {
+ .name = "udl",
+ .probe = udl_usb_probe,
+ .disconnect = udl_usb_disconnect,
+ .id_table = id_table,
+};
+
+static int __init udl_init(void)
+{
+ return drm_usb_init(&driver, &udl_driver);
+}
+
+static void __exit udl_exit(void)
+{
+ drm_usb_exit(&driver, &udl_driver);
+}
+
+module_init(udl_init);
+module_exit(udl_exit);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
new file mode 100644
index 0000000..1612954
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef UDL_DRV_H
+#define UDL_DRV_H
+
+#include <linux/usb.h>
+
+#define DRIVER_NAME "udl"
+#define DRIVER_DESC "DisplayLink"
+#define DRIVER_DATE "20120220"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 1
+
+struct udl_device;
+
+struct urb_node {
+ struct list_head entry;
+ struct udl_device *dev;
+ struct delayed_work release_urb_work;
+ struct urb *urb;
+};
+
+struct urb_list {
+ struct list_head list;
+ spinlock_t lock;
+ struct semaphore limit_sem;
+ int available;
+ int count;
+ size_t size;
+};
+
+struct udl_fbdev;
+
+struct udl_device {
+ struct device *dev;
+ struct drm_device *ddev;
+
+ int sku_pixel_limit;
+
+ struct urb_list urbs;
+ atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
+
+ struct udl_fbdev *fbdev;
+ char mode_buf[1024];
+ uint32_t mode_buf_len;
+ atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+ atomic_t bytes_identical; /* saved effort with backbuffer comparison */
+ atomic_t bytes_sent; /* to usb, after compression including overhead */
+ atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+};
+
+struct udl_gem_object {
+ struct drm_gem_object base;
+ struct page **pages;
+ void *vmapping;
+};
+
+#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
+
+struct udl_framebuffer {
+ struct drm_framebuffer base;
+ struct udl_gem_object *obj;
+ bool active_16; /* active on the 16-bit channel */
+};
+
+#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
+
+/* modeset */
+int udl_modeset_init(struct drm_device *dev);
+void udl_modeset_cleanup(struct drm_device *dev);
+int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
+
+struct drm_encoder *udl_encoder_init(struct drm_device *dev);
+
+struct urb *udl_get_urb(struct drm_device *dev);
+
+int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
+void udl_urb_completion(struct urb *urb);
+
+int udl_driver_load(struct drm_device *dev, unsigned long flags);
+int udl_driver_unload(struct drm_device *dev);
+
+int udl_fbdev_init(struct drm_device *dev);
+void udl_fbdev_cleanup(struct drm_device *dev);
+void udl_fbdev_unplug(struct drm_device *dev);
+struct drm_framebuffer *
+udl_fb_user_fb_create(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+ const char *front, char **urb_buf_ptr,
+ u32 byte_offset, u32 byte_width,
+ int *ident_ptr, int *sent_ptr);
+
+int udl_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
+
+int udl_gem_init_object(struct drm_gem_object *obj);
+void udl_gem_free_object(struct drm_gem_object *gem_obj);
+struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
+ size_t size);
+
+int udl_gem_vmap(struct udl_gem_object *obj);
+void udl_gem_vunmap(struct udl_gem_object *obj);
+int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ int width, int height);
+
+int udl_drop_usb(struct drm_device *dev);
+
+#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
+#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
+#define CMD_WRITE_COPY8 "\xAF\x62" /**< 8 bit copy command. */
+#define CMD_WRITE_RLX8 "\xAF\x63" /**< 8 bit extended run length command. */
+
+#define CMD_WRITE_RAW16 "\xAF\x68" /**< 16 bit raw write command. */
+#define CMD_WRITE_RL16 "\xAF\x69" /**< 16 bit run length command. */
+#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
+#define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */
+
+#endif
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
new file mode 100644
index 0000000..56e75f0
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "udl_drv.h"
+
+/* dummy encoder */
+void udl_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static void udl_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static bool udl_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void udl_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void udl_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void udl_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void
+udl_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static const struct drm_encoder_helper_funcs udl_helper_funcs = {
+ .dpms = udl_encoder_dpms,
+ .mode_fixup = udl_mode_fixup,
+ .prepare = udl_encoder_prepare,
+ .mode_set = udl_encoder_mode_set,
+ .commit = udl_encoder_commit,
+ .disable = udl_encoder_disable,
+};
+
+static const struct drm_encoder_funcs udl_enc_funcs = {
+ .destroy = udl_enc_destroy,
+};
+
+struct drm_encoder *udl_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &udl_helper_funcs);
+ encoder->possible_crtcs = 1;
+ return encoder;
+}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
new file mode 100644
index 0000000..4d9c3a5
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "udl_drv.h"
+
+#include "drm_fb_helper.h"
+
+#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+
+static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
+static int fb_bpp = 16;
+
+module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+
+struct udl_fbdev {
+ struct drm_fb_helper helper;
+ struct udl_framebuffer ufb;
+ struct list_head fbdev_list;
+ int fb_count;
+};
+
+#define DL_ALIGN_UP(x, a) ALIGN(x, a)
+#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+
+/** Read the red component (0..255) of a 32 bpp colour. */
+#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
+
+/** Read the green component (0..255) of a 32 bpp colour. */
+#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
+
+/** Read the blue component (0..255) of a 32 bpp colour. */
+#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
+
+/** Return red/green component of a 16 bpp colour number. */
+#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
+
+/** Return green/blue component of a 16 bpp colour number. */
+#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
+
+/** Return 8 bpp colour number from red, green and blue components. */
+#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
+
+#if 0
+static uint8_t rgb8(uint32_t col)
+{
+ uint8_t red = DLO_RGB_GETRED(col);
+ uint8_t grn = DLO_RGB_GETGRN(col);
+ uint8_t blu = DLO_RGB_GETBLU(col);
+
+ return DLO_RGB8(red, grn, blu);
+}
+
+static uint16_t rgb16(uint32_t col)
+{
+ uint8_t red = DLO_RGB_GETRED(col);
+ uint8_t grn = DLO_RGB_GETGRN(col);
+ uint8_t blu = DLO_RGB_GETBLU(col);
+
+ return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
+}
+#endif
+
+/*
+ * NOTE: fb_defio.c is holding info->fbdefio.mutex
+ * Touching ANY framebuffer memory that triggers a page fault
+ * in fb_defio will cause a deadlock, when it also tries to
+ * grab the same mutex.
+ */
+static void udlfb_dpy_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct page *cur;
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct udl_fbdev *ufbdev = info->par;
+ struct drm_device *dev = ufbdev->ufb.base.dev;
+ struct udl_device *udl = dev->dev_private;
+ struct urb *urb;
+ char *cmd;
+ cycles_t start_cycles, end_cycles;
+ int bytes_sent = 0;
+ int bytes_identical = 0;
+ int bytes_rendered = 0;
+
+ if (!fb_defio)
+ return;
+
+ start_cycles = get_cycles();
+
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return;
+
+ cmd = urb->transfer_buffer;
+
+ /* walk the written page list and render each to device */
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+
+ if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
+ &urb, (char *) info->fix.smem_start,
+ &cmd, cur->index << PAGE_SHIFT,
+ PAGE_SIZE, &bytes_identical, &bytes_sent))
+ goto error;
+ bytes_rendered += PAGE_SIZE;
+ }
+
+ if (cmd > (char *) urb->transfer_buffer) {
+ /* Send partial buffer remaining before exiting */
+ int len = cmd - (char *) urb->transfer_buffer;
+ udl_submit_urb(dev, urb, len);
+ bytes_sent += len;
+ } else
+ udl_urb_completion(urb);
+
+error:
+ atomic_add(bytes_sent, &udl->bytes_sent);
+ atomic_add(bytes_identical, &udl->bytes_identical);
+ atomic_add(bytes_rendered, &udl->bytes_rendered);
+ end_cycles = get_cycles();
+ atomic_add(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &udl->cpu_kcycles_used);
+}
+
+int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ int width, int height)
+{
+ struct drm_device *dev = fb->base.dev;
+ struct udl_device *udl = dev->dev_private;
+ int i, ret;
+ char *cmd;
+ cycles_t start_cycles, end_cycles;
+ int bytes_sent = 0;
+ int bytes_identical = 0;
+ struct urb *urb;
+ int aligned_x;
+ int bpp = (fb->base.bits_per_pixel / 8);
+
+ if (!fb->active_16)
+ return 0;
+
+ if (!fb->obj->vmapping)
+ udl_gem_vmap(fb->obj);
+
+ start_cycles = get_cycles();
+
+ aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+ width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+ x = aligned_x;
+
+ if ((width <= 0) ||
+ (x + width > fb->base.width) ||
+ (y + height > fb->base.height))
+ return -EINVAL;
+
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return 0;
+ cmd = urb->transfer_buffer;
+
+ for (i = y; i < y + height ; i++) {
+ const int line_offset = fb->base.pitches[0] * i;
+ const int byte_offset = line_offset + (x * bpp);
+
+ if (udl_render_hline(dev, bpp, &urb,
+ (char *) fb->obj->vmapping,
+ &cmd, byte_offset, width * bpp,
+ &bytes_identical, &bytes_sent))
+ goto error;
+ }
+
+ if (cmd > (char *) urb->transfer_buffer) {
+ /* Send partial buffer remaining before exiting */
+ int len = cmd - (char *) urb->transfer_buffer;
+ ret = udl_submit_urb(dev, urb, len);
+ bytes_sent += len;
+ } else
+ udl_urb_completion(urb);
+
+error:
+ atomic_add(bytes_sent, &udl->bytes_sent);
+ atomic_add(bytes_identical, &udl->bytes_identical);
+ atomic_add(width*height*bpp, &udl->bytes_rendered);
+ end_cycles = get_cycles();
+ atomic_add(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &udl->cpu_kcycles_used);
+
+ return 0;
+}
+
+static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long page, pos;
+
+ if (offset + size > info->fix.smem_len)
+ return -EINVAL;
+
+ pos = (unsigned long)info->fix.smem_start + offset;
+
+ pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
+ pos, size);
+
+ while (size > 0) {
+ page = vmalloc_to_pfn((void *)pos);
+ if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
+ }
+
+ vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ return 0;
+}
+
+static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct udl_fbdev *ufbdev = info->par;
+
+ sys_fillrect(info, rect);
+
+ udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct udl_fbdev *ufbdev = info->par;
+
+ sys_copyarea(info, region);
+
+ udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
+ region->height);
+}
+
+static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct udl_fbdev *ufbdev = info->par;
+
+ sys_imageblit(info, image);
+
+ udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
+ image->height);
+}
+
+/*
+ * It's common for several clients to have framebuffer open simultaneously.
+ * e.g. both fbcon and X. Makes things interesting.
+ * Assumes caller is holding info->lock (for open and release at least)
+ */
+static int udl_fb_open(struct fb_info *info, int user)
+{
+ struct udl_fbdev *ufbdev = info->par;
+ struct drm_device *dev = ufbdev->ufb.base.dev;
+ struct udl_device *udl = dev->dev_private;
+
+ /* If the USB device is gone, we don't accept new opens */
+ if (drm_device_is_unplugged(udl->ddev))
+ return -ENODEV;
+
+ ufbdev->fb_count++;
+
+ if (fb_defio && (info->fbdefio == NULL)) {
+ /* enable defio at last moment if not disabled by client */
+
+ struct fb_deferred_io *fbdefio;
+
+ fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+
+ if (fbdefio) {
+ fbdefio->delay = DL_DEFIO_WRITE_DELAY;
+ fbdefio->deferred_io = udlfb_dpy_deferred_io;
+ }
+
+ info->fbdefio = fbdefio;
+ fb_deferred_io_init(info);
+ }
+
+ pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
+ info->node, user, info, ufbdev->fb_count);
+
+ return 0;
+}
+
+
+/*
+ * Assumes caller is holding info->lock mutex (for open and release at least)
+ */
+static int udl_fb_release(struct fb_info *info, int user)
+{
+ struct udl_fbdev *ufbdev = info->par;
+
+ ufbdev->fb_count--;
+
+ if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+ info->fbdefio = NULL;
+ info->fbops->fb_mmap = udl_fb_mmap;
+ }
+
+ pr_warn("released /dev/fb%d user=%d count=%d\n",
+ info->node, user, ufbdev->fb_count);
+
+ return 0;
+}
+
+static struct fb_ops udlfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = udl_fb_fillrect,
+ .fb_copyarea = udl_fb_copyarea,
+ .fb_imageblit = udl_fb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+ .fb_mmap = udl_fb_mmap,
+ .fb_open = udl_fb_open,
+ .fb_release = udl_fb_release,
+};
+
+void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ *red = 0;
+ *green = 0;
+ *blue = 0;
+}
+
+static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct udl_framebuffer *ufb = to_udl_fb(fb);
+ int i;
+
+ if (!ufb->active_16)
+ return 0;
+
+ for (i = 0; i < num_clips; i++) {
+ udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
+ clips[i].x2 - clips[i].x1,
+ clips[i].y2 - clips[i].y1);
+ }
+ return 0;
+}
+
+static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct udl_framebuffer *ufb = to_udl_fb(fb);
+
+ if (ufb->obj)
+ drm_gem_object_unreference_unlocked(&ufb->obj->base);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(ufb);
+}
+
+static const struct drm_framebuffer_funcs udlfb_funcs = {
+ .destroy = udl_user_framebuffer_destroy,
+ .dirty = udl_user_framebuffer_dirty,
+ .create_handle = NULL,
+};
+
+
+static int
+udl_framebuffer_init(struct drm_device *dev,
+ struct udl_framebuffer *ufb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct udl_gem_object *obj)
+{
+ int ret;
+
+ ufb->obj = obj;
+ ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
+ drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+ return ret;
+}
+
+
+static int udlfb_create(struct udl_fbdev *ufbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = ufbdev->helper.dev;
+ struct fb_info *info;
+ struct device *device = &dev->usbdev->dev;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct udl_gem_object *obj;
+ uint32_t size;
+ int ret = 0;
+
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ obj = udl_gem_alloc_object(dev, size);
+ if (!obj)
+ goto out;
+
+ ret = udl_gem_vmap(obj);
+ if (ret) {
+ DRM_ERROR("failed to vmap fb\n");
+ goto out_gfree;
+ }
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_gfree;
+ }
+ info->par = ufbdev;
+
+ ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
+ if (ret)
+ goto out_gfree;
+
+ fb = &ufbdev->ufb.base;
+
+ ufbdev->helper.fb = fb;
+ ufbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "udldrmfb");
+
+ info->screen_base = ufbdev->ufb.obj->vmapping;
+ info->fix.smem_len = size;
+ info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &udlfb_ops;
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_gfree;
+ }
+
+
+ DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
+ fb->width, fb->height,
+ ufbdev->ufb.obj->vmapping);
+
+ return ret;
+out_gfree:
+ drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+out:
+ return ret;
+}
+
+static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = udlfb_create(ufbdev, sizes);
+ if (ret)
+ return ret;
+
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
+ .gamma_set = udl_crtc_fb_gamma_set,
+ .gamma_get = udl_crtc_fb_gamma_get,
+ .fb_probe = udl_fb_find_or_create_single,
+};
+
+static void udl_fbdev_destroy(struct drm_device *dev,
+ struct udl_fbdev *ufbdev)
+{
+ struct fb_info *info;
+ if (ufbdev->helper.fbdev) {
+ info = ufbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ drm_fb_helper_fini(&ufbdev->helper);
+ drm_framebuffer_cleanup(&ufbdev->ufb.base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+}
+
+int udl_fbdev_init(struct drm_device *dev)
+{
+ struct udl_device *udl = dev->dev_private;
+ int bpp_sel = fb_bpp;
+ struct udl_fbdev *ufbdev;
+ int ret;
+
+ ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
+ if (!ufbdev)
+ return -ENOMEM;
+
+ udl->fbdev = ufbdev;
+ ufbdev->helper.funcs = &udl_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &ufbdev->helper,
+ 1, 1);
+ if (ret) {
+ kfree(ufbdev);
+ return ret;
+
+ }
+
+ drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+ drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
+ return 0;
+}
+
+void udl_fbdev_cleanup(struct drm_device *dev)
+{
+ struct udl_device *udl = dev->dev_private;
+ if (!udl->fbdev)
+ return;
+
+ udl_fbdev_destroy(dev, udl->fbdev);
+ kfree(udl->fbdev);
+ udl->fbdev = NULL;
+}
+
+void udl_fbdev_unplug(struct drm_device *dev)
+{
+ struct udl_device *udl = dev->dev_private;
+ struct udl_fbdev *ufbdev;
+ if (!udl->fbdev)
+ return;
+
+ ufbdev = udl->fbdev;
+ if (ufbdev->helper.fbdev) {
+ struct fb_info *info;
+ info = ufbdev->helper.fbdev;
+ unlink_framebuffer(info);
+ }
+}
+
+struct drm_framebuffer *
+udl_fb_user_fb_create(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct udl_framebuffer *ufb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
+ if (ufb == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
+ if (ret) {
+ kfree(ufb);
+ return ERR_PTR(-EINVAL);
+ }
+ return &ufb->base;
+}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
new file mode 100644
index 0000000..852642d
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include "drmP.h"
+#include "udl_drv.h"
+#include <linux/shmem_fs.h>
+
+struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
+ size_t size)
+{
+ struct udl_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
+
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
+}
+
+static int
+udl_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ uint32_t *handle_p)
+{
+ struct udl_gem_object *obj;
+ int ret;
+ u32 handle;
+
+ size = roundup(size, PAGE_SIZE);
+
+ obj = udl_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ if (ret) {
+ drm_gem_object_release(&obj->base);
+ kfree(obj);
+ return ret;
+ }
+
+ drm_gem_object_unreference(&obj->base);
+ *handle_p = handle;
+ return 0;
+}
+
+int udl_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = args->width * ((args->bpp + 1) / 8);
+ args->size = args->pitch * args->height;
+ return udl_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
+ struct page *page;
+ unsigned int page_offset;
+ int ret = 0;
+
+ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ if (!obj->pages)
+ return VM_FAULT_SIGBUS;
+
+ page = obj->pages[page_offset];
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+ switch (ret) {
+ case -EAGAIN:
+ set_need_resched();
+ case 0:
+ case -ERESTARTSYS:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+int udl_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+
+ return 0;
+}
+
+static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
+{
+ int page_count, i;
+ struct page *page;
+ struct inode *inode;
+ struct address_space *mapping;
+
+ if (obj->pages)
+ return 0;
+
+ page_count = obj->base.size / PAGE_SIZE;
+ BUG_ON(obj->pages != NULL);
+ obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+ if (obj->pages == NULL)
+ return -ENOMEM;
+
+ inode = obj->base.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < page_count; i++) {
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(page))
+ goto err_pages;
+ obj->pages[i] = page;
+ }
+
+ return 0;
+err_pages:
+ while (i--)
+ page_cache_release(obj->pages[i]);
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return PTR_ERR(page);
+}
+
+static void udl_gem_put_pages(struct udl_gem_object *obj)
+{
+ int page_count = obj->base.size / PAGE_SIZE;
+ int i;
+
+ for (i = 0; i < page_count; i++)
+ page_cache_release(obj->pages[i]);
+
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+}
+
+int udl_gem_vmap(struct udl_gem_object *obj)
+{
+ int page_count = obj->base.size / PAGE_SIZE;
+ int ret;
+
+ ret = udl_gem_get_pages(obj, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
+ if (!obj->vmapping)
+ return -ENOMEM;
+ return 0;
+}
+
+void udl_gem_vunmap(struct udl_gem_object *obj)
+{
+ if (obj->vmapping)
+ vunmap(obj->vmapping);
+
+ udl_gem_put_pages(obj);
+}
+
+void udl_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct udl_gem_object *obj = to_udl_bo(gem_obj);
+
+ if (obj->vmapping)
+ udl_gem_vunmap(obj);
+
+ if (obj->pages)
+ udl_gem_put_pages(obj);
+
+ if (gem_obj->map_list.map)
+ drm_gem_free_mmap_offset(gem_obj);
+}
+
+/* the dumb interface doesn't work with the GEM straight MMAP
+ interface, it expects to do MMAP on the drm fd, like normal */
+int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct udl_gem_object *gobj;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ gobj = to_udl_bo(obj);
+
+ ret = udl_gem_get_pages(gobj, GFP_KERNEL);
+ if (ret)
+ return ret;
+ if (!gobj->base.map_list.map) {
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ }
+
+ *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+
+out:
+ drm_gem_object_unreference(&gobj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
new file mode 100644
index 0000000..a8d5f09
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#include "drmP.h"
+#include "udl_drv.h"
+
+/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
+#define BULK_SIZE 512
+
+#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
+#define WRITES_IN_FLIGHT (4)
+#define MAX_VENDOR_DESCRIPTOR_SIZE 256
+
+#define GET_URB_TIMEOUT HZ
+#define FREE_URB_TIMEOUT (HZ*2)
+
+static int udl_parse_vendor_descriptor(struct drm_device *dev,
+ struct usb_device *usbdev)
+{
+ struct udl_device *udl = dev->dev_private;
+ char *desc;
+ char *buf;
+ char *desc_end;
+
+ u8 total_len = 0;
+
+ buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
+ if (!buf)
+ return false;
+ desc = buf;
+
+ total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
+ 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
+ if (total_len > 5) {
+ DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
+ "%02x %02x %02x %02x %02x %02x %02x\n",
+ total_len, desc[0],
+ desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
+ desc[7], desc[8], desc[9], desc[10]);
+
+ if ((desc[0] != total_len) || /* descriptor length */
+ (desc[1] != 0x5f) || /* vendor descriptor type */
+ (desc[2] != 0x01) || /* version (2 bytes) */
+ (desc[3] != 0x00) ||
+ (desc[4] != total_len - 2)) /* length after type */
+ goto unrecognized;
+
+ desc_end = desc + total_len;
+ desc += 5; /* the fixed header we've already parsed */
+
+ while (desc < desc_end) {
+ u8 length;
+ u16 key;
+
+ key = *((u16 *) desc);
+ desc += sizeof(u16);
+ length = *desc;
+ desc++;
+
+ switch (key) {
+ case 0x0200: { /* max_area */
+ u32 max_area;
+ max_area = le32_to_cpu(*((u32 *)desc));
+ DRM_DEBUG("DL chip limited to %d pixel modes\n",
+ max_area);
+ udl->sku_pixel_limit = max_area;
+ break;
+ }
+ default:
+ break;
+ }
+ desc += length;
+ }
+ }
+
+ goto success;
+
+unrecognized:
+ /* allow udlfb to load for now even if firmware unrecognized */
+ DRM_ERROR("Unrecognized vendor firmware descriptor\n");
+
+success:
+ kfree(buf);
+ return true;
+}
+
+static void udl_release_urb_work(struct work_struct *work)
+{
+ struct urb_node *unode = container_of(work, struct urb_node,
+ release_urb_work.work);
+
+ up(&unode->dev->urbs.limit_sem);
+}
+
+void udl_urb_completion(struct urb *urb)
+{
+ struct urb_node *unode = urb->context;
+ struct udl_device *udl = unode->dev;
+ unsigned long flags;
+
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)) {
+ DRM_ERROR("%s - nonzero write bulk status received: %d\n",
+ __func__, urb->status);
+ atomic_set(&udl->lost_pixels, 1);
+ }
+ }
+
+ urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
+
+ spin_lock_irqsave(&udl->urbs.lock, flags);
+ list_add_tail(&unode->entry, &udl->urbs.list);
+ udl->urbs.available++;
+ spin_unlock_irqrestore(&udl->urbs.lock, flags);
+
+#if 0
+ /*
+ * When using fb_defio, we deadlock if up() is called
+ * while another is waiting. So queue to another process.
+ */
+ if (fb_defio)
+ schedule_delayed_work(&unode->release_urb_work, 0);
+ else
+#endif
+ up(&udl->urbs.limit_sem);
+}
+
+static void udl_free_urb_list(struct drm_device *dev)
+{
+ struct udl_device *udl = dev->dev_private;
+ int count = udl->urbs.count;
+ struct list_head *node;
+ struct urb_node *unode;
+ struct urb *urb;
+ int ret;
+ unsigned long flags;
+
+ DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
+
+ /* keep waiting and freeing, until we've got 'em all */
+ while (count--) {
+
+ /* Getting interrupted means a leak, but ok at shutdown*/
+ ret = down_interruptible(&udl->urbs.limit_sem);
+ if (ret)
+ break;
+
+ spin_lock_irqsave(&udl->urbs.lock, flags);
+
+ node = udl->urbs.list.next; /* have reserved one with sem */
+ list_del_init(node);
+
+ spin_unlock_irqrestore(&udl->urbs.lock, flags);
+
+ unode = list_entry(node, struct urb_node, entry);
+ urb = unode->urb;
+
+ /* Free each separately allocated piece */
+ usb_free_coherent(urb->dev, udl->urbs.size,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+ kfree(node);
+ }
+ udl->urbs.count = 0;
+}
+
+static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+{
+ struct udl_device *udl = dev->dev_private;
+ int i = 0;
+ struct urb *urb;
+ struct urb_node *unode;
+ char *buf;
+
+ spin_lock_init(&udl->urbs.lock);
+
+ udl->urbs.size = size;
+ INIT_LIST_HEAD(&udl->urbs.list);
+
+ while (i < count) {
+ unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
+ if (!unode)
+ break;
+ unode->dev = udl;
+
+ INIT_DELAYED_WORK(&unode->release_urb_work,
+ udl_release_urb_work);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(unode);
+ break;
+ }
+ unode->urb = urb;
+
+ buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ kfree(unode);
+ usb_free_urb(urb);
+ break;
+ }
+
+ /* urb->transfer_buffer_length set to actual before submit */
+ usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1),
+ buf, size, udl_urb_completion, unode);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ list_add_tail(&unode->entry, &udl->urbs.list);
+
+ i++;
+ }
+
+ sema_init(&udl->urbs.limit_sem, i);
+ udl->urbs.count = i;
+ udl->urbs.available = i;
+
+ DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+
+ return i;
+}
+
+struct urb *udl_get_urb(struct drm_device *dev)
+{
+ struct udl_device *udl = dev->dev_private;
+ int ret = 0;
+ struct list_head *entry;
+ struct urb_node *unode;
+ struct urb *urb = NULL;
+ unsigned long flags;
+
+ /* Wait for an in-flight buffer to complete and get re-queued */
+ ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
+ if (ret) {
+ atomic_set(&udl->lost_pixels, 1);
+ DRM_INFO("wait for urb interrupted: %x available: %d\n",
+ ret, udl->urbs.available);
+ goto error;
+ }
+
+ spin_lock_irqsave(&udl->urbs.lock, flags);
+
+ BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
+ entry = udl->urbs.list.next;
+ list_del_init(entry);
+ udl->urbs.available--;
+
+ spin_unlock_irqrestore(&udl->urbs.lock, flags);
+
+ unode = list_entry(entry, struct urb_node, entry);
+ urb = unode->urb;
+
+error:
+ return urb;
+}
+
+int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+{
+ struct udl_device *udl = dev->dev_private;
+ int ret;
+
+ BUG_ON(len > udl->urbs.size);
+
+ urb->transfer_buffer_length = len; /* set to actual payload len */
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ udl_urb_completion(urb); /* because no one else will */
+ atomic_set(&udl->lost_pixels, 1);
+ DRM_ERROR("usb_submit_urb error %x\n", ret);
+ }
+ return ret;
+}
+
+int udl_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct udl_device *udl;
+ int ret;
+
+ DRM_DEBUG("\n");
+ udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
+ if (!udl)
+ return -ENOMEM;
+
+ udl->ddev = dev;
+ dev->dev_private = udl;
+
+ if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
+ DRM_ERROR("firmware not recognized. Assume incompatible device\n");
+ goto err;
+ }
+
+ if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ ret = -ENOMEM;
+ DRM_ERROR("udl_alloc_urb_list failed\n");
+ goto err;
+ }
+
+ DRM_DEBUG("\n");
+ ret = udl_modeset_init(dev);
+
+ ret = udl_fbdev_init(dev);
+ return 0;
+err:
+ kfree(udl);
+ DRM_ERROR("%d\n", ret);
+ return ret;
+}
+
+int udl_drop_usb(struct drm_device *dev)
+{
+ udl_free_urb_list(dev);
+ return 0;
+}
+
+int udl_driver_unload(struct drm_device *dev)
+{
+ struct udl_device *udl = dev->dev_private;
+
+ if (udl->urbs.count)
+ udl_free_urb_list(dev);
+
+ udl_fbdev_cleanup(dev);
+ udl_modeset_cleanup(dev);
+ kfree(udl);
+ return 0;
+}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
new file mode 100644
index 0000000..b3ecb3d
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "udl_drv.h"
+
+/*
+ * All DisplayLink bulk operations start with 0xAF, followed by specific code
+ * All operations are written to buffers which then later get sent to device
+ */
+static char *udl_set_register(char *buf, u8 reg, u8 val)
+{
+ *buf++ = 0xAF;
+ *buf++ = 0x20;
+ *buf++ = reg;
+ *buf++ = val;
+ return buf;
+}
+
+static char *udl_vidreg_lock(char *buf)
+{
+ return udl_set_register(buf, 0xFF, 0x00);
+}
+
+static char *udl_vidreg_unlock(char *buf)
+{
+ return udl_set_register(buf, 0xFF, 0xFF);
+}
+
+/*
+ * On/Off for driving the DisplayLink framebuffer to the display
+ * 0x00 H and V sync on
+ * 0x01 H and V sync off (screen blank but powered)
+ * 0x07 DPMS powerdown (requires modeset to come back)
+ */
+static char *udl_enable_hvsync(char *buf, bool enable)
+{
+ if (enable)
+ return udl_set_register(buf, 0x1F, 0x00);
+ else
+ return udl_set_register(buf, 0x1F, 0x07);
+}
+
+static char *udl_set_color_depth(char *buf, u8 selection)
+{
+ return udl_set_register(buf, 0x00, selection);
+}
+
+static char *udl_set_base16bpp(char *wrptr, u32 base)
+{
+ /* the base pointer is 16 bits wide, 0x20 is hi byte. */
+ wrptr = udl_set_register(wrptr, 0x20, base >> 16);
+ wrptr = udl_set_register(wrptr, 0x21, base >> 8);
+ return udl_set_register(wrptr, 0x22, base);
+}
+
+/*
+ * DisplayLink HW has separate 16bpp and 8bpp framebuffers.
+ * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
+ */
+static char *udl_set_base8bpp(char *wrptr, u32 base)
+{
+ wrptr = udl_set_register(wrptr, 0x26, base >> 16);
+ wrptr = udl_set_register(wrptr, 0x27, base >> 8);
+ return udl_set_register(wrptr, 0x28, base);
+}
+
+static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
+{
+ wrptr = udl_set_register(wrptr, reg, value >> 8);
+ return udl_set_register(wrptr, reg+1, value);
+}
+
+/*
+ * This is kind of weird because the controller takes some
+ * register values in a different byte order than other registers.
+ */
+static char *udl_set_register_16be(char *wrptr, u8 reg, u16 value)
+{
+ wrptr = udl_set_register(wrptr, reg, value);
+ return udl_set_register(wrptr, reg+1, value >> 8);
+}
+
+/*
+ * LFSR is linear feedback shift register. The reason we have this is
+ * because the display controller needs to minimize the clock depth of
+ * various counters used in the display path. So this code reverses the
+ * provided value into the lfsr16 value by counting backwards to get
+ * the value that needs to be set in the hardware comparator to get the
+ * same actual count. This makes sense once you read above a couple of
+ * times and think about it from a hardware perspective.
+ */
+static u16 udl_lfsr16(u16 actual_count)
+{
+ u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
+
+ while (actual_count--) {
+ lv = ((lv << 1) |
+ (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
+ & 0xFFFF;
+ }
+
+ return (u16) lv;
+}
+
+/*
+ * This does LFSR conversion on the value that is to be written.
+ * See LFSR explanation above for more detail.
+ */
+static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
+{
+ return udl_set_register_16(wrptr, reg, udl_lfsr16(value));
+}
+
+/*
+ * This takes a standard fbdev screeninfo struct and all of its monitor mode
+ * details and converts them into the DisplayLink equivalent register commands.
+ ERR(vreg(dev, 0x00, (color_depth == 16) ? 0 : 1));
+ ERR(vreg_lfsr16(dev, 0x01, xDisplayStart));
+ ERR(vreg_lfsr16(dev, 0x03, xDisplayEnd));
+ ERR(vreg_lfsr16(dev, 0x05, yDisplayStart));
+ ERR(vreg_lfsr16(dev, 0x07, yDisplayEnd));
+ ERR(vreg_lfsr16(dev, 0x09, xEndCount));
+ ERR(vreg_lfsr16(dev, 0x0B, hSyncStart));
+ ERR(vreg_lfsr16(dev, 0x0D, hSyncEnd));
+ ERR(vreg_big_endian(dev, 0x0F, hPixels));
+ ERR(vreg_lfsr16(dev, 0x11, yEndCount));
+ ERR(vreg_lfsr16(dev, 0x13, vSyncStart));
+ ERR(vreg_lfsr16(dev, 0x15, vSyncEnd));
+ ERR(vreg_big_endian(dev, 0x17, vPixels));
+ ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz));
+
+ ERR(vreg(dev, 0x1F, 0));
+
+ ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK)));
+ */
+static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
+{
+ u16 xds, yds;
+ u16 xde, yde;
+ u16 yec;
+
+ /* x display start */
+ xds = mode->crtc_htotal - mode->crtc_hsync_start;
+ wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds);
+ /* x display end */
+ xde = xds + mode->crtc_hdisplay;
+ wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde);
+
+ /* y display start */
+ yds = mode->crtc_vtotal - mode->crtc_vsync_start;
+ wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds);
+ /* y display end */
+ yde = yds + mode->crtc_vdisplay;
+ wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde);
+
+ /* x end count is active + blanking - 1 */
+ wrptr = udl_set_register_lfsr16(wrptr, 0x09,
+ mode->crtc_htotal - 1);
+
+ /* libdlo hardcodes hsync start to 1 */
+ wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1);
+
+ /* hsync end is width of sync pulse + 1 */
+ wrptr = udl_set_register_lfsr16(wrptr, 0x0D,
+ mode->crtc_hsync_end - mode->crtc_hsync_start + 1);
+
+ /* hpixels is active pixels */
+ wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay);
+
+ /* yendcount is vertical active + vertical blanking */
+ yec = mode->crtc_vtotal;
+ wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec);
+
+ /* libdlo hardcodes vsync start to 0 */
+ wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0);
+
+ /* vsync end is width of vsync pulse */
+ wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+ /* vpixels is active pixels */
+ wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay);
+
+ wrptr = udl_set_register_16be(wrptr, 0x1B,
+ mode->clock / 5);
+
+ return wrptr;
+}
+
+static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = dev->dev_private;
+ struct urb *urb;
+ char *buf;
+ int retval;
+
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return -ENOMEM;
+
+ buf = (char *)urb->transfer_buffer;
+
+ memcpy(buf, udl->mode_buf, udl->mode_buf_len);
+ retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
+ DRM_INFO("write mode info %d\n", udl->mode_buf_len);
+ return retval;
+}
+
+
+static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = dev->dev_private;
+ int retval;
+
+ if (mode == DRM_MODE_DPMS_OFF) {
+ char *buf;
+ struct urb *urb;
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return;
+
+ buf = (char *)urb->transfer_buffer;
+ buf = udl_vidreg_lock(buf);
+ buf = udl_enable_hvsync(buf, false);
+ buf = udl_vidreg_unlock(buf);
+
+ retval = udl_submit_urb(dev, urb, buf - (char *)
+ urb->transfer_buffer);
+ } else {
+ if (udl->mode_buf_len == 0) {
+ DRM_ERROR("Trying to enable DPMS with no mode\n");
+ return;
+ }
+ udl_crtc_write_mode_to_hw(crtc);
+ }
+
+}
+
+static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+
+{
+ return true;
+}
+
+#if 0
+static int
+udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return 0;
+}
+
+static int
+udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return 0;
+}
+#endif
+
+static int udl_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+
+{
+ struct drm_device *dev = crtc->dev;
+ struct udl_framebuffer *ufb = to_udl_fb(crtc->fb);
+ struct udl_device *udl = dev->dev_private;
+ char *buf;
+ char *wrptr;
+ int color_depth = 0;
+
+ buf = (char *)udl->mode_buf;
+
+ /* for now we just clip 24 -> 16 - if we fix that fix this */
+ /*if (crtc->fb->bits_per_pixel != 16)
+ color_depth = 1; */
+
+ /* This first section has to do with setting the base address on the
+ * controller * associated with the display. There are 2 base
+ * pointers, currently, we only * use the 16 bpp segment.
+ */
+ wrptr = udl_vidreg_lock(buf);
+ wrptr = udl_set_color_depth(wrptr, color_depth);
+ /* set base for 16bpp segment to 0 */
+ wrptr = udl_set_base16bpp(wrptr, 0);
+ /* set base for 8bpp segment to end of fb */
+ wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
+
+ wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
+ wrptr = udl_enable_hvsync(wrptr, true);
+ wrptr = udl_vidreg_unlock(wrptr);
+
+ ufb->active_16 = true;
+ if (old_fb) {
+ struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
+ uold_fb->active_16 = false;
+ }
+ udl->mode_buf_len = wrptr - buf;
+
+ /* damage all of it */
+ udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+ return 0;
+}
+
+
+static void udl_crtc_disable(struct drm_crtc *crtc)
+{
+
+
+}
+
+static void udl_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static void udl_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void udl_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void udl_crtc_commit(struct drm_crtc *crtc)
+{
+ udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static struct drm_crtc_helper_funcs udl_helper_funcs = {
+ .dpms = udl_crtc_dpms,
+ .mode_fixup = udl_crtc_mode_fixup,
+ .mode_set = udl_crtc_mode_set,
+ .prepare = udl_crtc_prepare,
+ .commit = udl_crtc_commit,
+ .disable = udl_crtc_disable,
+ .load_lut = udl_load_lut,
+};
+
+static const struct drm_crtc_funcs udl_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = udl_crtc_destroy,
+};
+
+int udl_crtc_init(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
+ if (crtc == NULL)
+ return -ENOMEM;
+
+ drm_crtc_init(dev, crtc, &udl_crtc_funcs);
+ drm_crtc_helper_add(crtc, &udl_helper_funcs);
+
+ return 0;
+}
+
+static const struct drm_mode_config_funcs udl_mode_funcs = {
+ .fb_create = udl_fb_user_fb_create,
+ .output_poll_changed = NULL,
+};
+
+int udl_modeset_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 640;
+ dev->mode_config.min_height = 480;
+
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.prefer_shadow = 0;
+ dev->mode_config.preferred_depth = 24;
+
+ dev->mode_config.funcs = (void *)&udl_mode_funcs;
+
+ drm_mode_create_dirty_info_property(dev);
+
+ udl_crtc_init(dev);
+
+ encoder = udl_encoder_init(dev);
+
+ udl_connector_init(dev, encoder);
+
+ return 0;
+}
+
+void udl_modeset_cleanup(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
new file mode 100644
index 0000000..b9320e2
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/prefetch.h>
+
+#include "drmP.h"
+#include "udl_drv.h"
+
+#define MAX_CMD_PIXELS 255
+
+#define RLX_HEADER_BYTES 7
+#define MIN_RLX_PIX_BYTES 4
+#define MIN_RLX_CMD_BYTES (RLX_HEADER_BYTES + MIN_RLX_PIX_BYTES)
+
+#define RLE_HEADER_BYTES 6
+#define MIN_RLE_PIX_BYTES 3
+#define MIN_RLE_CMD_BYTES (RLE_HEADER_BYTES + MIN_RLE_PIX_BYTES)
+
+#define RAW_HEADER_BYTES 6
+#define MIN_RAW_PIX_BYTES 2
+#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
+
+/*
+ * Trims identical data from front and back of line
+ * Sets new front buffer address and width
+ * And returns byte count of identical pixels
+ * Assumes CPU natural alignment (unsigned long)
+ * for back and front buffer ptrs and width
+ */
+#if 0
+static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
+{
+ int j, k;
+ const unsigned long *back = (const unsigned long *) bback;
+ const unsigned long *front = (const unsigned long *) *bfront;
+ const int width = *width_bytes / sizeof(unsigned long);
+ int identical = width;
+ int start = width;
+ int end = width;
+
+ prefetch((void *) front);
+ prefetch((void *) back);
+
+ for (j = 0; j < width; j++) {
+ if (back[j] != front[j]) {
+ start = j;
+ break;
+ }
+ }
+
+ for (k = width - 1; k > j; k--) {
+ if (back[k] != front[k]) {
+ end = k+1;
+ break;
+ }
+ }
+
+ identical = start + (width - end);
+ *bfront = (u8 *) &front[start];
+ *width_bytes = (end - start) * sizeof(unsigned long);
+
+ return identical * sizeof(unsigned long);
+}
+#endif
+
+static inline u16 pixel32_to_be16p(const uint8_t *pixel)
+{
+ uint32_t pix = *(uint32_t *)pixel;
+ u16 retval;
+
+ retval = (((pix >> 3) & 0x001f) |
+ ((pix >> 5) & 0x07e0) |
+ ((pix >> 8) & 0xf800));
+ return retval;
+}
+
+/*
+ * Render a command stream for an encoded horizontal line segment of pixels.
+ *
+ * A command buffer holds several commands.
+ * It always begins with a fresh command header
+ * (the protocol doesn't require this, but we enforce it to allow
+ * multiple buffers to be potentially encoded and sent in parallel).
+ * A single command encodes one contiguous horizontal line of pixels
+ *
+ * The function relies on the client to do all allocation, so that
+ * rendering can be done directly to output buffers (e.g. USB URBs).
+ * The function fills the supplied command buffer, providing information
+ * on where it left off, so the client may call in again with additional
+ * buffers if the line will take several buffers to complete.
+ *
+ * A single command can transmit a maximum of 256 pixels,
+ * regardless of the compression ratio (protocol design limit).
+ * To the hardware, 0 for a size byte means 256
+ *
+ * Rather than 256 pixel commands which are either rl or raw encoded,
+ * the rlx command simply assumes alternating raw and rl spans within one cmd.
+ * This has a slightly larger header overhead, but produces more even results.
+ * It also processes all data (read and write) in a single pass.
+ * Performance benchmarks of common cases show it having just slightly better
+ * compression than 256 pixel raw or rle commands, with similar CPU consumpion.
+ * But for very rl friendly data, will compress not quite as well.
+ */
+static void udl_compress_hline16(
+ const u8 **pixel_start_ptr,
+ const u8 *const pixel_end,
+ uint32_t *device_address_ptr,
+ uint8_t **command_buffer_ptr,
+ const uint8_t *const cmd_buffer_end, int bpp)
+{
+ const u8 *pixel = *pixel_start_ptr;
+ uint32_t dev_addr = *device_address_ptr;
+ uint8_t *cmd = *command_buffer_ptr;
+
+ while ((pixel_end > pixel) &&
+ (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
+ uint8_t *raw_pixels_count_byte = 0;
+ uint8_t *cmd_pixels_count_byte = 0;
+ const u8 *raw_pixel_start = 0;
+ const u8 *cmd_pixel_start, *cmd_pixel_end = 0;
+
+ prefetchw((void *) cmd); /* pull in one cache line at least */
+
+ *cmd++ = 0xaf;
+ *cmd++ = 0x6b;
+ *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
+ *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
+ *cmd++ = (uint8_t) ((dev_addr) & 0xFF);
+
+ cmd_pixels_count_byte = cmd++; /* we'll know this later */
+ cmd_pixel_start = pixel;
+
+ raw_pixels_count_byte = cmd++; /* we'll know this later */
+ raw_pixel_start = pixel;
+
+ cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
+ min((int)(pixel_end - pixel) / bpp,
+ (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+
+ prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
+
+ while (pixel < cmd_pixel_end) {
+ const u8 * const repeating_pixel = pixel;
+
+ if (bpp == 2)
+ *(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel);
+ else if (bpp == 4)
+ *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel));
+
+ cmd += 2;
+ pixel += bpp;
+
+ if (unlikely((pixel < cmd_pixel_end) &&
+ (!memcmp(pixel, repeating_pixel, bpp)))) {
+ /* go back and fill in raw pixel count */
+ *raw_pixels_count_byte = (((repeating_pixel -
+ raw_pixel_start) / bpp) + 1) & 0xFF;
+
+ while ((pixel < cmd_pixel_end)
+ && (!memcmp(pixel, repeating_pixel, bpp))) {
+ pixel += bpp;
+ }
+
+ /* immediately after raw data is repeat byte */
+ *cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF;
+
+ /* Then start another raw pixel span */
+ raw_pixel_start = pixel;
+ raw_pixels_count_byte = cmd++;
+ }
+ }
+
+ if (pixel > raw_pixel_start) {
+ /* finalize last RAW span */
+ *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+ }
+
+ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
+ dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
+ }
+
+ if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
+ /* Fill leftover bytes with no-ops */
+ if (cmd_buffer_end > cmd)
+ memset(cmd, 0xAF, cmd_buffer_end - cmd);
+ cmd = (uint8_t *) cmd_buffer_end;
+ }
+
+ *command_buffer_ptr = cmd;
+ *pixel_start_ptr = pixel;
+ *device_address_ptr = dev_addr;
+
+ return;
+}
+
+/*
+ * There are 3 copies of every pixel: The front buffer that the fbdev
+ * client renders to, the actual framebuffer across the USB bus in hardware
+ * (that we can only write to, slowly, and can never read), and (optionally)
+ * our shadow copy that tracks what's been sent to that hardware buffer.
+ */
+int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+ const char *front, char **urb_buf_ptr,
+ u32 byte_offset, u32 byte_width,
+ int *ident_ptr, int *sent_ptr)
+{
+ const u8 *line_start, *line_end, *next_pixel;
+ u32 base16 = 0 + (byte_offset / bpp) * 2;
+ struct urb *urb = *urb_ptr;
+ u8 *cmd = *urb_buf_ptr;
+ u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
+
+ line_start = (u8 *) (front + byte_offset);
+ next_pixel = line_start;
+ line_end = next_pixel + byte_width;
+
+ while (next_pixel < line_end) {
+
+ udl_compress_hline16(&next_pixel,
+ line_end, &base16,
+ (u8 **) &cmd, (u8 *) cmd_end, bpp);
+
+ if (cmd >= cmd_end) {
+ int len = cmd - (u8 *) urb->transfer_buffer;
+ if (udl_submit_urb(dev, urb, len))
+ return 1; /* lost pixels is set */
+ *sent_ptr += len;
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return 1; /* lost_pixels is set */
+ *urb_ptr = urb;
+ cmd = urb->transfer_buffer;
+ cmd_end = &cmd[urb->transfer_buffer_length];
+ }
+ }
+
+ *urb_buf_ptr = cmd;
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index a2ab343..1f18225 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -106,6 +106,8 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
idr_init(&dev->object_name_idr);
+ pci_set_master(dev->pdev);
+
ret = drm_vblank_init(dev, 1);
if (ret) {
kfree(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2d6f573..ee24d21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -38,6 +38,10 @@
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
+#define VMW_MIN_INITIAL_WIDTH 800
+#define VMW_MIN_INITIAL_HEIGHT 600
+
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -387,6 +391,41 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
BUG_ON(n3d < 0);
}
+/**
+ * Sets the initial_[width|height] fields on the given vmw_private.
+ *
+ * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
+ * clamping the value to fb_max_[width|height] fields and the
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ * If the values appear to be invalid, set them to
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ */
+static void vmw_get_initial_size(struct vmw_private *dev_priv)
+{
+ uint32_t width;
+ uint32_t height;
+
+ width = vmw_read(dev_priv, SVGA_REG_WIDTH);
+ height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
+
+ width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
+ height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
+
+ if (width > dev_priv->fb_max_width ||
+ height > dev_priv->fb_max_height) {
+
+ /*
+ * This is a host error and shouldn't occur.
+ */
+
+ width = VMW_MIN_INITIAL_WIDTH;
+ height = VMW_MIN_INITIAL_HEIGHT;
+ }
+
+ dev_priv->initial_width = width;
+ dev_priv->initial_height = height;
+}
+
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@@ -400,6 +439,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
memset(dev_priv, 0, sizeof(*dev_priv));
+ pci_set_master(dev->pdev);
+
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
@@ -441,6 +482,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+ vmw_get_initial_size(dev_priv);
+
if (dev_priv->capabilities & SVGA_CAP_GMR) {
dev_priv->max_gmr_descriptors =
vmw_read(dev_priv,
@@ -688,6 +732,15 @@ static int vmw_driver_unload(struct drm_device *dev)
return 0;
}
+static void vmw_preclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
+}
+
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -710,6 +763,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp == NULL))
return ret;
+ INIT_LIST_HEAD(&vmw_fp->fence_events);
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
@@ -1102,6 +1156,7 @@ static struct drm_driver driver = {
.master_set = vmw_master_set,
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
+ .preclose = vmw_preclose,
.postclose = vmw_postclose,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index dc27970..d0f2c07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include "ttm/ttm_module.h"
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20111025"
+#define VMWGFX_DRIVER_DATE "20120209"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 3
+#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -62,6 +62,7 @@
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
+ struct list_head fence_events;
};
struct vmw_dma_buffer {
@@ -202,6 +203,8 @@ struct vmw_private {
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
+ uint32_t initial_width;
+ uint32_t initial_height;
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
@@ -533,7 +536,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
uint32_t command_size,
uint64_t throttle_us,
struct drm_vmw_fence_rep __user
- *user_fence_rep);
+ *user_fence_rep,
+ struct vmw_fence_obj **out_fence);
extern void
vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 40932fb..4acced4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1109,10 +1109,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
- struct drm_vmw_fence_rep __user *user_fence_rep)
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct vmw_fence_obj **out_fence)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
- struct vmw_fence_obj *fence;
+ struct vmw_fence_obj *fence = NULL;
uint32_t handle;
void *cmd;
int ret;
@@ -1208,8 +1209,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
- if (likely(fence != NULL))
+ /* Don't unreference when handing fence out */
+ if (unlikely(out_fence != NULL)) {
+ *out_fence = fence;
+ fence = NULL;
+ } else if (likely(fence != NULL)) {
vmw_fence_obj_unreference(&fence);
+ }
mutex_unlock(&dev_priv->cmdbuf_mutex);
return 0;
@@ -1362,7 +1368,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
- (void __user *)(unsigned long)arg->fence_rep);
+ (void __user *)(unsigned long)arg->fence_rep,
+ NULL);
if (unlikely(ret != 0))
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 34e51a1..3c447bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -414,10 +414,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
- /* XXX These shouldn't be hardcoded. */
- initial_width = 800;
- initial_height = 600;
-
fb_bpp = 32;
fb_depth = 24;
@@ -425,8 +421,8 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- initial_width = min(fb_width, initial_width);
- initial_height = min(fb_height, initial_height);
+ initial_width = min(vmw_priv->initial_width, fb_width);
+ initial_height = min(vmw_priv->initial_height, fb_height);
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
@@ -515,19 +511,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->var.xres = initial_width;
info->var.yres = initial_height;
-#if 0
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
-#else
- info->pixmap.size = 0;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
-#endif
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 15fb260..f2fb8f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -69,12 +69,13 @@ struct vmw_user_fence {
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
- struct drm_pending_event e;
struct vmw_fence_action action;
+ struct list_head fpriv_head;
+
+ struct drm_pending_event *event;
struct vmw_fence_obj *fence;
struct drm_device *dev;
- struct kref kref;
- uint32_t size;
+
uint32_t *tv_sec;
uint32_t *tv_usec;
};
@@ -784,46 +785,40 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_event_fence_action_destroy
- *
- * @kref: The struct kref embedded in a struct vmw_event_fence_action.
- *
- * The vmw_event_fence_action destructor that may be called either after
- * the fence action cleanup, or when the event is delivered.
- * It frees both the vmw_event_fence_action struct and the actual
- * event structure copied to user-space.
- */
-static void vmw_event_fence_action_destroy(struct kref *kref)
-{
- struct vmw_event_fence_action *eaction =
- container_of(kref, struct vmw_event_fence_action, kref);
- struct ttm_mem_global *mem_glob =
- vmw_mem_glob(vmw_priv(eaction->dev));
- uint32_t size = eaction->size;
-
- kfree(eaction->e.event);
- kfree(eaction);
- ttm_mem_global_free(mem_glob, size);
-}
-
-
-/**
- * vmw_event_fence_action_delivered
+ * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
*
- * @e: The struct drm_pending_event embedded in a struct
- * vmw_event_fence_action.
+ * @fman: Pointer to a struct vmw_fence_manager
+ * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
+ * with pointers to a struct drm_file object about to be closed.
*
- * The struct drm_pending_event destructor that is called by drm
- * once the event is delivered. Since we don't know whether this function
- * will be called before or after the fence action destructor, we
- * free a refcount and destroy if it becomes zero.
+ * This function removes all pending fence events with references to a
+ * specific struct drm_file object about to be closed. The caller is required
+ * to pass a list of all struct vmw_event_fence_action objects with such
+ * events attached. This function is typically called before the
+ * struct drm_file object's event management is taken down.
*/
-static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
+void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+ struct list_head *event_list)
{
- struct vmw_event_fence_action *eaction =
- container_of(e, struct vmw_event_fence_action, e);
+ struct vmw_event_fence_action *eaction;
+ struct drm_pending_event *event;
+ unsigned long irq_flags;
- kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+ while (1) {
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ if (list_empty(event_list))
+ goto out_unlock;
+ eaction = list_first_entry(event_list,
+ struct vmw_event_fence_action,
+ fpriv_head);
+ list_del_init(&eaction->fpriv_head);
+ event = eaction->event;
+ eaction->event = NULL;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+ event->destroy(event);
+ }
+out_unlock:
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
}
@@ -836,18 +831,21 @@ static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
* This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context, and may be called
- * from irq context. It ups a refcount reflecting that we now have two
- * destructors.
+ * from irq context.
*/
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
struct drm_device *dev = eaction->dev;
- struct drm_file *file_priv = eaction->e.file_priv;
+ struct drm_pending_event *event = eaction->event;
+ struct drm_file *file_priv;
unsigned long irq_flags;
- kref_get(&eaction->kref);
+ if (unlikely(event == NULL))
+ return;
+
+ file_priv = event->file_priv;
spin_lock_irqsave(&dev->event_lock, irq_flags);
if (likely(eaction->tv_sec != NULL)) {
@@ -858,7 +856,9 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
*eaction->tv_usec = tv.tv_usec;
}
- list_add_tail(&eaction->e.link, &file_priv->event_list);
+ list_del_init(&eaction->fpriv_head);
+ list_add_tail(&eaction->event->link, &file_priv->event_list);
+ eaction->event = NULL;
wake_up_all(&file_priv->event_wait);
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
}
@@ -876,9 +876,15 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
+ struct vmw_fence_manager *fman = eaction->fence->fman;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ list_del(&eaction->fpriv_head);
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
vmw_fence_obj_unreference(&eaction->fence);
- kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+ kfree(eaction);
}
@@ -946,39 +952,23 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
* an error code, the caller needs to free that object.
*/
-int vmw_event_fence_action_create(struct drm_file *file_priv,
- struct vmw_fence_obj *fence,
- struct drm_event *event,
- uint32_t *tv_sec,
- uint32_t *tv_usec,
- bool interruptible)
+int vmw_event_fence_action_queue(struct drm_file *file_priv,
+ struct vmw_fence_obj *fence,
+ struct drm_pending_event *event,
+ uint32_t *tv_sec,
+ uint32_t *tv_usec,
+ bool interruptible)
{
struct vmw_event_fence_action *eaction;
- struct ttm_mem_global *mem_glob =
- vmw_mem_glob(fence->fman->dev_priv);
struct vmw_fence_manager *fman = fence->fman;
- uint32_t size = fman->event_fence_action_size +
- ttm_round_pot(event->length);
- int ret;
-
- /*
- * Account for internal structure size as well as the
- * event size itself.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
- if (unlikely(ret != 0))
- return ret;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ unsigned long irq_flags;
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
- if (unlikely(eaction == NULL)) {
- ttm_mem_global_free(mem_glob, size);
+ if (unlikely(eaction == NULL))
return -ENOMEM;
- }
- eaction->e.event = event;
- eaction->e.file_priv = file_priv;
- eaction->e.destroy = vmw_event_fence_action_delivered;
+ eaction->event = event;
eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
eaction->action.cleanup = vmw_event_fence_action_cleanup;
@@ -986,16 +976,89 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = fman->dev_priv->dev;
- eaction->size = size;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- kref_init(&eaction->kref);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
vmw_fence_obj_add_action(fence, &eaction->action);
return 0;
}
+struct vmw_event_fence_pending {
+ struct drm_pending_event base;
+ struct drm_vmw_event_fence event;
+};
+
+int vmw_event_fence_action_create(struct drm_file *file_priv,
+ struct vmw_fence_obj *fence,
+ uint32_t flags,
+ uint64_t user_data,
+ bool interruptible)
+{
+ struct vmw_event_fence_pending *event;
+ struct drm_device *dev = fence->fman->dev_priv->dev;
+ unsigned long irq_flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+ ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
+ if (likely(ret == 0))
+ file_priv->event_space -= sizeof(event->event);
+
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate event space for this file.\n");
+ goto out_no_space;
+ }
+
+
+ event = kzalloc(sizeof(event->event), GFP_KERNEL);
+ if (unlikely(event == NULL)) {
+ DRM_ERROR("Failed to allocate an event.\n");
+ ret = -ENOMEM;
+ goto out_no_event;
+ }
+
+ event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+ event->event.base.length = sizeof(*event);
+ event->event.user_data = user_data;
+
+ event->base.event = &event->event.base;
+ event->base.file_priv = file_priv;
+ event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+
+ if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ interruptible);
+ else
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ NULL,
+ NULL,
+ interruptible);
+ if (ret != 0)
+ goto out_no_queue;
+
+out_no_queue:
+ event->base.destroy(&event->base);
+out_no_event:
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+ file_priv->event_space += sizeof(*event);
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+out_no_space:
+ return ret;
+}
+
int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1008,8 +1071,6 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
uint32_t handle;
- unsigned long irq_flags;
- struct drm_vmw_event_fence *event;
int ret;
/*
@@ -1062,59 +1123,28 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
BUG_ON(fence == NULL);
- spin_lock_irqsave(&dev->event_lock, irq_flags);
-
- ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
- if (likely(ret == 0))
- file_priv->event_space -= sizeof(*event);
-
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate event space for this file.\n");
- goto out_no_event_space;
- }
-
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (unlikely(event == NULL)) {
- DRM_ERROR("Failed to allocate an event.\n");
- goto out_no_event;
- }
-
- event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
- event->base.length = sizeof(*event);
- event->user_data = arg->user_data;
-
if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
ret = vmw_event_fence_action_create(file_priv, fence,
- &event->base,
- &event->tv_sec,
- &event->tv_usec,
+ arg->flags,
+ arg->user_data,
true);
else
ret = vmw_event_fence_action_create(file_priv, fence,
- &event->base,
- NULL,
- NULL,
+ arg->flags,
+ arg->user_data,
true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
- goto out_no_attach;
+ goto out_no_create;
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
handle);
vmw_fence_obj_unreference(&fence);
return 0;
-out_no_attach:
- kfree(event);
-out_no_event:
- spin_lock_irqsave(&dev->event_lock, irq_flags);
- file_priv->event_space += sizeof(*event);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-out_no_event_space:
+out_no_create:
if (user_fence_rep != NULL)
ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 0854a20..faf2e78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -109,5 +109,12 @@ extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+ struct list_head *event_list);
+extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
+ struct vmw_fence_obj *fence,
+ struct drm_pending_event *event,
+ uint32_t *tv_sec,
+ uint32_t *tv_usec,
+ bool interruptible);
#endif /* _VMWGFX_FENCE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b66ef0e..2286d47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -422,7 +422,8 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
- unsigned num_clips, int inc)
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
@@ -542,12 +543,15 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
if (num == 0)
continue;
+ /* only return the last fence */
+ if (out_fence && *out_fence)
+ vmw_fence_obj_unreference(out_fence);
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
@@ -598,7 +602,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
flags, color,
- clips, num_clips, inc);
+ clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
return 0;
@@ -809,7 +813,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
cmd->body.ptr.offset = 0;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, NULL);
kfree(cmd);
@@ -821,7 +825,8 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
+ unsigned num_clips, int increment,
+ struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
@@ -894,9 +899,13 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
if (hit_num == 0)
continue;
+ /* only return the last fence */
+ if (out_fence && *out_fence)
+ vmw_fence_obj_unreference(out_fence);
+
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
@@ -942,7 +951,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
} else {
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
flags, color,
- clips, num_clips, increment);
+ clips, num_clips, increment, NULL);
}
ttm_read_unlock(&vmaster->lock);
@@ -1296,7 +1305,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, NULL);
if (unlikely(ret != 0))
break;
@@ -1409,7 +1418,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
- 0, user_fence_rep);
+ 0, user_fence_rep, NULL);
kfree(cmd);
@@ -1672,6 +1681,70 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
+int vmw_du_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct drm_framebuffer *old_fb = crtc->fb;
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+ struct drm_file *file_priv = event->base.file_priv;
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_clip_rect clips;
+ int ret;
+
+ /* require ScreenObject support for page flipping */
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
+ if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
+ return -EINVAL;
+
+ crtc->fb = fb;
+
+ /* do a full screen dirty update */
+ clips.x1 = clips.y1 = 0;
+ clips.x2 = fb->width;
+ clips.y2 = fb->height;
+
+ if (vfb->dmabuf)
+ ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
+ 0, 0, &clips, 1, 1, &fence);
+ else
+ ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
+ 0, 0, &clips, 1, 1, &fence);
+
+
+ if (ret != 0)
+ goto out_no_fence;
+ if (!fence) {
+ ret = -EINVAL;
+ goto out_no_fence;
+ }
+
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ true);
+
+ /*
+ * No need to hold on to this now. The only cleanup
+ * we need to do if we fail is unref the fence.
+ */
+ vmw_fence_obj_unreference(&fence);
+
+ if (vmw_crtc_to_du(crtc)->is_implicit)
+ vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
+
+ return ret;
+
+out_no_fence:
+ crtc->fb = old_fb;
+ return ret;
+}
+
+
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a4f7f03..8184bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -121,6 +121,9 @@ struct vmw_display_unit {
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+int vmw_du_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -154,5 +157,10 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f77b184..070fb23 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -354,8 +354,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
INIT_LIST_HEAD(&ldu->active);
ldu->base.pref_active = (unit == 0);
- ldu->base.pref_width = 800;
- ldu->base.pref_height = 600;
+ ldu->base.pref_width = dev_priv->initial_width;
+ ldu->base.pref_height = dev_priv->initial_height;
ldu->base.pref_mode = NULL;
ldu->base.is_implicit = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4defdcf..6deaf2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -394,6 +394,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.set_config = vmw_sou_crtc_set_config,
+ .page_flip = vmw_du_page_flip,
};
/*
@@ -448,8 +449,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->active_implicit = false;
sou->base.pref_active = (unit == 0);
- sou->base.pref_width = 800;
- sou->base.pref_height = 600;
+ sou->base.pref_width = dev_priv->initial_width;
+ sou->base.pref_height = dev_priv->initial_height;
sou->base.pref_mode = NULL;
sou->base.is_implicit = true;
@@ -535,3 +536,36 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
return 0;
}
+
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ if (!sou->base.is_implicit)
+ return true;
+
+ if (dev_priv->sou_priv->num_implicit != 1)
+ return false;
+
+ return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ BUG_ON(!sou->base.is_implicit);
+
+ dev_priv->sou_priv->implicit_fb =
+ vmw_framebuffer_to_vfb(sou->base.crtc.fb);
+}
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 24f94f4..acba1c6 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -616,10 +616,11 @@ static u32 bit_func(struct i2c_adapter *adap)
/* -----exported algorithm data: ------------------------------------- */
-static const struct i2c_algorithm i2c_bit_algo = {
+const struct i2c_algorithm i2c_bit_algo = {
.master_xfer = bit_xfer,
.functionality = bit_func,
};
+EXPORT_SYMBOL(i2c_bit_algo);
/*
* registering functions to load algorithms at runtime
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index ac9141b..c6ce416 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1665,6 +1665,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (ret)
return -EINVAL;
+ unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
@@ -1672,7 +1673,6 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
registered_fb[i] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
- device_destroy(fb_class, MKDEV(FB_MAJOR, i));
event.info = fb_info;
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
@@ -1681,6 +1681,22 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
return 0;
}
+int unlink_framebuffer(struct fb_info *fb_info)
+{
+ int i;
+
+ i = fb_info->node;
+ if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
+ return -EINVAL;
+
+ if (fb_info->dev) {
+ device_destroy(fb_class, MKDEV(FB_MAJOR, i));
+ fb_info->dev = NULL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(unlink_framebuffer);
+
void remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
{
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index a197731..a40c05e 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1739,7 +1739,7 @@ static void dlfb_usb_disconnect(struct usb_interface *interface)
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_remove_file(info->dev, &fb_device_attrs[i]);
device_remove_bin_file(info->dev, &edid_attr);
-
+ unlink_framebuffer(info);
usb_set_intfdata(interface, NULL);
/* if clients still have us open, will be freed on last close */
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 49d94ed..34a7b89 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -761,6 +761,8 @@ struct drm_event_vblank {
#define DRM_CAP_DUMB_BUFFER 0x1
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
/* typedef area */
#ifndef __KERNEL__
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 92f0981..574bd1c 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1170,6 +1170,8 @@ struct drm_device {
struct idr object_name_idr;
/*@} */
int switch_power_state;
+
+ atomic_t unplugged; /* device has been unplugged or gone away */
};
#define DRM_SWITCH_POWER_ON 0
@@ -1235,6 +1237,19 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
}
#endif
+static inline void drm_device_set_unplugged(struct drm_device *dev)
+{
+ smp_wmb();
+ atomic_set(&dev->unplugged, 1);
+}
+
+static inline int drm_device_is_unplugged(struct drm_device *dev)
+{
+ int ret = atomic_read(&dev->unplugged);
+ smp_rmb();
+ return ret;
+}
+
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
@@ -1264,11 +1279,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"
-extern void drm_mem_init(void);
-extern int drm_mem_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
-
extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
@@ -1383,12 +1393,8 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
/* IRQ support (drm_irq.h) */
extern int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
extern int drm_irq_install(struct drm_device *dev);
extern int drm_irq_uninstall(struct drm_device *dev);
-extern void drm_driver_irq_preinstall(struct drm_device *dev);
-extern void drm_driver_irq_postinstall(struct drm_device *dev);
-extern void drm_driver_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
@@ -1464,6 +1470,7 @@ extern void drm_master_put(struct drm_master **master);
extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
+extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
extern unsigned int drm_vblank_offdelay;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 4cd4be2..e250eda 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -121,7 +121,7 @@ struct drm_display_mode {
char name[DRM_DISPLAY_MODE_LEN];
enum drm_mode_status status;
- int type;
+ unsigned int type;
/* Proposed mode values */
int clock; /* in kHz */
@@ -257,7 +257,7 @@ struct drm_property_blob {
struct drm_mode_object base;
struct list_head head;
unsigned int length;
- void *data;
+ unsigned char data[];
};
struct drm_property_enum {
@@ -796,6 +796,9 @@ struct drm_mode_config {
struct drm_property *scaling_mode_property;
struct drm_property *dithering_mode_property;
struct drm_property *dirty_info_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -807,23 +810,29 @@ struct drm_mode_config {
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
-extern void drm_crtc_init(struct drm_device *dev,
- struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs);
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
-extern void drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type);
+extern int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
extern void drm_connector_cleanup(struct drm_connector *connector);
+/* helper to unplug all connectors from sysfs for device */
+extern void drm_connector_unplug_all(struct drm_device *dev);
-extern void drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type);
+extern int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type);
extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane,
@@ -848,6 +857,7 @@ extern struct edid *drm_get_edid(struct drm_connector *connector,
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
@@ -862,7 +872,7 @@ extern int drm_mode_height(struct drm_display_mode *mode);
/* for us by fb module */
extern int drm_mode_attachmode_crtc(struct drm_device *dev,
struct drm_crtc *crtc,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
@@ -904,6 +914,13 @@ extern int drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
+extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
@@ -919,7 +936,7 @@ extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
-extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type);
@@ -995,6 +1012,7 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 74ce916..bcb9a66 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -238,5 +238,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
+int drm_load_edid_firmware(struct drm_connector *connector);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 6e3076a..5120b01 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -35,7 +35,6 @@ struct drm_fb_helper;
#include <linux/kgdb.h>
struct drm_fb_helper_crtc {
- uint32_t crtc_id;
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
};
@@ -74,7 +73,6 @@ struct drm_fb_helper {
int connector_count;
struct drm_fb_helper_connector **connector_info;
struct drm_fb_helper_funcs *funcs;
- int conn_limit;
struct fb_info *fbdev;
u32 pseudo_palette[17];
struct list_head kernel_fb_list;
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 1ed3aae..3963116 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -74,16 +74,37 @@ struct drm_exynos_gem_mmap {
uint64_t mapped;
};
+/**
+ * A structure for user connection request of virtual display.
+ *
+ * @connection: indicate whether doing connetion or not by user.
+ * @extensions: if this value is 1 then the vidi driver would need additional
+ * 128bytes edid data.
+ * @edid: the edid data pointer from user side.
+ */
+struct drm_exynos_vidi_connection {
+ unsigned int connection;
+ unsigned int extensions;
+ uint64_t *edid;
+};
+
struct drm_exynos_plane_set_zpos {
__u32 plane_id;
__s32 zpos;
};
+/* memory type definitions. */
+enum e_drm_exynos_gem_mem_type {
+ /* Physically Non-Continuous memory. */
+ EXYNOS_BO_NONCONTIG = 1 << 0
+};
+
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06
+#define DRM_EXYNOS_VIDI_CONNECTION 0x07
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -97,6 +118,9 @@ struct drm_exynos_plane_set_zpos {
#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
+#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
+
#ifdef __KERNEL__
/**
@@ -147,11 +171,13 @@ struct exynos_drm_common_hdmi_pd {
* @timing: default video mode for initializing
* @default_win: default window layer number to be used for UI.
* @bpp: default bit per pixel.
+ * @is_v13: set if hdmi version 13 is.
*/
struct exynos_drm_hdmi_pdata {
struct fb_videomode timing;
unsigned int default_win;
unsigned int bpp;
+ unsigned int is_v13:1;
};
#endif /* __KERNEL__ */
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
index 1136867..884613e 100644
--- a/include/drm/gma_drm.h
+++ b/include/drm/gma_drm.h
@@ -83,9 +83,9 @@ struct drm_psb_gem_mmap {
#define DRM_GMA_GAMMA 0x04 /* Set gamma table */
#define DRM_GMA_ADB 0x05 /* Get backlight */
#define DRM_GMA_DPST_BL 0x06 /* Set backlight */
-#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x1 /* CRTC to physical pipe# */
#define DRM_GMA_MODE_OPERATION 0x07 /* Mode validation/DC set */
#define PSB_MODE_OPERATION_MODE_VALID 0x01
+#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x08 /* CRTC to physical pipe# */
#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 924f6a4..da929bb 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -296,6 +296,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
+#define I915_PARAM_HAS_LLC 17
typedef struct drm_i915_getparam {
int param;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index b174620..0a0001b 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -15,6 +15,10 @@ const struct intel_gtt {
unsigned int needs_dmar : 1;
/* Whether we idle the gpu before mapping/unmapping */
unsigned int do_idle_maps : 1;
+ /* Share the scratch page dma with ppgtts. */
+ dma_addr_t scratch_page_dma;
+ /* for ppgtt PDE access */
+ u32 __iomem *gtt;
} *intel_gtt_get(void);
void intel_gtt_chipset_flush(void);
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index b55da40..cb2f0c3 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -804,13 +804,23 @@ struct drm_radeon_gem_create {
uint32_t flags;
};
-#define RADEON_TILING_MACRO 0x1
-#define RADEON_TILING_MICRO 0x2
-#define RADEON_TILING_SWAP_16BIT 0x4
-#define RADEON_TILING_SWAP_32BIT 0x8
-#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
- * when mapped - i.e. front buffer */
-#define RADEON_TILING_MICRO_SQUARE 0x20
+#define RADEON_TILING_MACRO 0x1
+#define RADEON_TILING_MICRO 0x2
+#define RADEON_TILING_SWAP_16BIT 0x4
+#define RADEON_TILING_SWAP_32BIT 0x8
+/* this object requires a surface when mapped - i.e. front buffer */
+#define RADEON_TILING_SURFACE 0x10
+#define RADEON_TILING_MICRO_SQUARE 0x20
+#define RADEON_TILING_EG_BANKW_SHIFT 8
+#define RADEON_TILING_EG_BANKW_MASK 0xf
+#define RADEON_TILING_EG_BANKH_SHIFT 12
+#define RADEON_TILING_EG_BANKH_MASK 0xf
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16
+#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf
+#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24
+#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28
+#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
struct drm_radeon_gem_set_tiling {
uint32_t handle;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c18122f..a395b8c 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1003,6 +1003,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
+extern int unlink_framebuffer(struct fb_info *fb_info);
extern void remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
index 4f98148..584ffa0 100644
--- a/include/linux/i2c-algo-bit.h
+++ b/include/linux/i2c-algo-bit.h
@@ -49,5 +49,6 @@ struct i2c_algo_bit_data {
int i2c_bit_add_bus(struct i2c_adapter *);
int i2c_bit_add_numbered_bus(struct i2c_adapter *);
+extern const struct i2c_algorithm i2c_bit_algo;
#endif /* _LINUX_I2C_ALGO_BIT_H */
diff --git a/include/linux/i2c/tc35876x.h b/include/linux/i2c/tc35876x.h
new file mode 100644
index 0000000..cd6a51c
--- /dev/null
+++ b/include/linux/i2c/tc35876x.h
@@ -0,0 +1,11 @@
+
+#ifndef _TC35876X_H
+#define _TC35876X_H
+
+struct tc35876x_platform_data {
+ int gpio_bridge_reset;
+ int gpio_panel_bl_en;
+ int gpio_panel_vadd;
+};
+
+#endif /* _TC35876X_H */
OpenPOWER on IntegriCloud